hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
b98f15f89702939052e9452b3bbf10844eeef81a.hip | // !!! This is a file automatically generated by hipify!!!
#include "GravityKernel.cuh"
#include<ctime>
#include<iostream>
#include<stdio.h>
#include<cuda.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define THREADS_PER_BLOCK 64
__global__ void acceleration_kernel(Particle* particle, float* acc_sum, Particle* particles, int count) {
int positionID = blockDim.x * blockIdx.x + threadIdx.x;
float gConstant = 10;
if (positionID < count) {
Particle ref = particles[positionID];
float distX = ref.position[0] - particle->position[0];
float distY = ref.position[1] - particle->position[1];
float dist2 = distX * distX + distY * distY;
float force = 0;
if (dist2 > 100) {
force = gConstant * ref.mass * particle->mass / (float)dist2;
float rad = atan2(distY, distX);
acc_sum[positionID] = force * cos(rad)/particle->mass;
acc_sum[positionID + Constant::NUM_PARTICLES] = force * sin(rad)/particle->mass;
}
else {
acc_sum[positionID] = 0;
acc_sum[positionID + Constant::NUM_PARTICLES] = 0;
}
}
}
__global__ void sum_kernel(float* acc_sum) {
const int tid = threadIdx.x;
auto step_size = 1;
int number_of_threads = blockDim.x;
if (tid < Constant::NUM_PARTICLES) {
while (number_of_threads > 0)
{
if (tid < number_of_threads) // still alive?
{
const auto fst = tid * step_size * 2;
const auto snd = fst + step_size;
acc_sum[fst] += acc_sum[snd];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
}
__global__ void gravity_kernel(Particle* particles, float* acc_sum, int count) {
float dt = .10
;
float drag = 0;
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < count) {
const int blocks = Constant::NUM_PARTICLES / THREADS_PER_BLOCK + 1;
const int threads = Constant::NUM_PARTICLES / 2;
acceleration_kernel << <blocks, THREADS_PER_BLOCK >> > (&particles[id], (acc_sum + 2 * id * Constant::NUM_PARTICLES), particles, count);
sum_kernel << <1, threads >> > ((acc_sum + 2 * id * Constant::NUM_PARTICLES));
sum_kernel << <1, threads >> > ((acc_sum + (2 * id + 1) * Constant::NUM_PARTICLES));
float x = 0, y = 0;
/*for (int i = 0; i < Constant::NUM_PARTICLES; i++) {
x += acc_sum[2 * id * Constant::NUM_PARTICLES];
y += acc_sum[2 * id * Constant::NUM_PARTICLES + Constant::NUM_PARTICLES];
}*/
x = acc_sum[2 * id * Constant::NUM_PARTICLES];
y = acc_sum[(2 * id + 1) * Constant::NUM_PARTICLES];
Particle ref = particles[id];
float* vel = ref.velocity;
float* pos = ref.position;
vel[0] += x * dt;
vel[1] += y * dt;
vel[0] -= vel[0] * dt * drag;
vel[1] -= vel[1] * dt * drag;
pos[0] += vel[0] * dt;
pos[1] += vel[1] * dt;
}
}
GravityKernel::GravityKernel() {
this->_world = new World();
}
GravityKernel::GravityKernel(World* world) {
this->_world = world;
}
__host__ void GravityKernel::cudaPrep() {
Particle* particles = this->_world->particles;
Particle* d_particles;
float* d_acc_sum;
float** d_positions = new float* [Constant::NUM_PARTICLES];
float** d_velocities = new float* [Constant::NUM_PARTICLES];
if (hipMalloc(&d_particles, sizeof(Particle) * Constant::NUM_PARTICLES) != hipSuccess) {
std::cout << "Particle Device Allocation Error" << std::endl;
return;
}
if (hipMalloc(&d_acc_sum, sizeof(float) * Constant::NUM_PARTICLES * Constant::NUM_PARTICLES * Constant::DIMENSIONS) != hipSuccess) {
std::cout << "Particle Accelerations Allocation Error" << std::endl;
return;
}
for (int i = 0; i < Constant::NUM_PARTICLES; i++) {
if (hipMalloc(&(d_positions[i]), sizeof(float) * Constant::DIMENSIONS) != hipSuccess) {
std::cout << "Position Mapping Failure" << std::endl;
}
if (hipMalloc(&(d_velocities[i]), sizeof(float) * Constant::DIMENSIONS) != hipSuccess) {
std::cout << "Velocity Mapping Failure" << std::endl;
}
if (hipMemcpy(&(d_particles[i].position), &(d_positions[i]), sizeof(float*), hipMemcpyHostToDevice) != hipSuccess){
std::cout << "Particle Position Allocation Error" << std::endl;
}
if (hipMemcpy(&(d_particles[i].velocity), &(d_velocities[i]), sizeof(float*), hipMemcpyHostToDevice) != hipSuccess) {
std::cout << "Particle Velocity Allocation Error" << std::endl;
}
}
for (int i = 0; i < Constant::NUM_PARTICLES; i++) {
if (hipMemcpy(d_positions[i], particles[i].position, sizeof(float) * Constant::DIMENSIONS, hipMemcpyHostToDevice) != hipSuccess) {
std::cout << "Particle Position Allocation Error" << std::endl;
}
if (hipMemcpy(d_velocities[i], particles[i].velocity, sizeof(float) * Constant::DIMENSIONS, hipMemcpyHostToDevice) != hipSuccess) {
std::cout << "Particle Velocity Allocation Error" << std::endl;
}
if (hipMemcpy(&(d_particles[i].mass), &(particles[i].mass), sizeof(int), hipMemcpyHostToDevice) != hipSuccess) {
std::cout << "Particle Mass Allocation Error" << std::endl;
}
}
this->d_particles = d_particles;
this->d_acc_sum = d_acc_sum;
this->d_positions = d_positions;
this->d_velocities = d_velocities;
}
__host__ void GravityKernel::runKernel() {
int blocks = Constant::NUM_PARTICLES / THREADS_PER_BLOCK + 1;
hipLaunchKernelGGL(( gravity_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, d_particles, d_acc_sum, Constant::NUM_PARTICLES);
hipDeviceSynchronize();
Particle* ref = this->_world->particles;
for (int i = 0; i < Constant::NUM_PARTICLES; i++) {
hipMemcpy(ref[i].position, d_positions[i], sizeof(float) * Constant::DIMENSIONS, hipMemcpyDeviceToHost);
hipMemcpy(ref[i].velocity, d_velocities[i], sizeof(float) * Constant::DIMENSIONS, hipMemcpyDeviceToHost);
}
}
__host__ void GravityKernel::cudaClear() {
std::cout << "Clearing memory" << std::endl;
for (int i = 0; i < Constant::NUM_PARTICLES; i++) {
hipFree(&(d_particles[i].position));
hipFree(&(d_particles[i].velocity));
}
hipFree(d_particles);
}
| b98f15f89702939052e9452b3bbf10844eeef81a.cu | #include "GravityKernel.cuh"
#include<ctime>
#include<iostream>
#include<stdio.h>
#include<cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define THREADS_PER_BLOCK 64
__global__ void acceleration_kernel(Particle* particle, float* acc_sum, Particle* particles, int count) {
int positionID = blockDim.x * blockIdx.x + threadIdx.x;
float gConstant = 10;
if (positionID < count) {
Particle ref = particles[positionID];
float distX = ref.position[0] - particle->position[0];
float distY = ref.position[1] - particle->position[1];
float dist2 = distX * distX + distY * distY;
float force = 0;
if (dist2 > 100) {
force = gConstant * ref.mass * particle->mass / (float)dist2;
float rad = atan2(distY, distX);
acc_sum[positionID] = force * cos(rad)/particle->mass;
acc_sum[positionID + Constant::NUM_PARTICLES] = force * sin(rad)/particle->mass;
}
else {
acc_sum[positionID] = 0;
acc_sum[positionID + Constant::NUM_PARTICLES] = 0;
}
}
}
__global__ void sum_kernel(float* acc_sum) {
const int tid = threadIdx.x;
auto step_size = 1;
int number_of_threads = blockDim.x;
if (tid < Constant::NUM_PARTICLES) {
while (number_of_threads > 0)
{
if (tid < number_of_threads) // still alive?
{
const auto fst = tid * step_size * 2;
const auto snd = fst + step_size;
acc_sum[fst] += acc_sum[snd];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
}
__global__ void gravity_kernel(Particle* particles, float* acc_sum, int count) {
float dt = .10
;
float drag = 0;
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < count) {
const int blocks = Constant::NUM_PARTICLES / THREADS_PER_BLOCK + 1;
const int threads = Constant::NUM_PARTICLES / 2;
acceleration_kernel << <blocks, THREADS_PER_BLOCK >> > (&particles[id], (acc_sum + 2 * id * Constant::NUM_PARTICLES), particles, count);
sum_kernel << <1, threads >> > ((acc_sum + 2 * id * Constant::NUM_PARTICLES));
sum_kernel << <1, threads >> > ((acc_sum + (2 * id + 1) * Constant::NUM_PARTICLES));
float x = 0, y = 0;
/*for (int i = 0; i < Constant::NUM_PARTICLES; i++) {
x += acc_sum[2 * id * Constant::NUM_PARTICLES];
y += acc_sum[2 * id * Constant::NUM_PARTICLES + Constant::NUM_PARTICLES];
}*/
x = acc_sum[2 * id * Constant::NUM_PARTICLES];
y = acc_sum[(2 * id + 1) * Constant::NUM_PARTICLES];
Particle ref = particles[id];
float* vel = ref.velocity;
float* pos = ref.position;
vel[0] += x * dt;
vel[1] += y * dt;
vel[0] -= vel[0] * dt * drag;
vel[1] -= vel[1] * dt * drag;
pos[0] += vel[0] * dt;
pos[1] += vel[1] * dt;
}
}
GravityKernel::GravityKernel() {
this->_world = new World();
}
GravityKernel::GravityKernel(World* world) {
this->_world = world;
}
__host__ void GravityKernel::cudaPrep() {
Particle* particles = this->_world->particles;
Particle* d_particles;
float* d_acc_sum;
float** d_positions = new float* [Constant::NUM_PARTICLES];
float** d_velocities = new float* [Constant::NUM_PARTICLES];
if (cudaMalloc(&d_particles, sizeof(Particle) * Constant::NUM_PARTICLES) != cudaSuccess) {
std::cout << "Particle Device Allocation Error" << std::endl;
return;
}
if (cudaMalloc(&d_acc_sum, sizeof(float) * Constant::NUM_PARTICLES * Constant::NUM_PARTICLES * Constant::DIMENSIONS) != cudaSuccess) {
std::cout << "Particle Accelerations Allocation Error" << std::endl;
return;
}
for (int i = 0; i < Constant::NUM_PARTICLES; i++) {
if (cudaMalloc(&(d_positions[i]), sizeof(float) * Constant::DIMENSIONS) != cudaSuccess) {
std::cout << "Position Mapping Failure" << std::endl;
}
if (cudaMalloc(&(d_velocities[i]), sizeof(float) * Constant::DIMENSIONS) != cudaSuccess) {
std::cout << "Velocity Mapping Failure" << std::endl;
}
if (cudaMemcpy(&(d_particles[i].position), &(d_positions[i]), sizeof(float*), cudaMemcpyHostToDevice) != cudaSuccess){
std::cout << "Particle Position Allocation Error" << std::endl;
}
if (cudaMemcpy(&(d_particles[i].velocity), &(d_velocities[i]), sizeof(float*), cudaMemcpyHostToDevice) != cudaSuccess) {
std::cout << "Particle Velocity Allocation Error" << std::endl;
}
}
for (int i = 0; i < Constant::NUM_PARTICLES; i++) {
if (cudaMemcpy(d_positions[i], particles[i].position, sizeof(float) * Constant::DIMENSIONS, cudaMemcpyHostToDevice) != cudaSuccess) {
std::cout << "Particle Position Allocation Error" << std::endl;
}
if (cudaMemcpy(d_velocities[i], particles[i].velocity, sizeof(float) * Constant::DIMENSIONS, cudaMemcpyHostToDevice) != cudaSuccess) {
std::cout << "Particle Velocity Allocation Error" << std::endl;
}
if (cudaMemcpy(&(d_particles[i].mass), &(particles[i].mass), sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) {
std::cout << "Particle Mass Allocation Error" << std::endl;
}
}
this->d_particles = d_particles;
this->d_acc_sum = d_acc_sum;
this->d_positions = d_positions;
this->d_velocities = d_velocities;
}
__host__ void GravityKernel::runKernel() {
int blocks = Constant::NUM_PARTICLES / THREADS_PER_BLOCK + 1;
gravity_kernel<<<blocks, THREADS_PER_BLOCK>>>(d_particles, d_acc_sum, Constant::NUM_PARTICLES);
cudaDeviceSynchronize();
Particle* ref = this->_world->particles;
for (int i = 0; i < Constant::NUM_PARTICLES; i++) {
cudaMemcpy(ref[i].position, d_positions[i], sizeof(float) * Constant::DIMENSIONS, cudaMemcpyDeviceToHost);
cudaMemcpy(ref[i].velocity, d_velocities[i], sizeof(float) * Constant::DIMENSIONS, cudaMemcpyDeviceToHost);
}
}
__host__ void GravityKernel::cudaClear() {
std::cout << "Clearing memory" << std::endl;
for (int i = 0; i < Constant::NUM_PARTICLES; i++) {
cudaFree(&(d_particles[i].position));
cudaFree(&(d_particles[i].velocity));
}
cudaFree(d_particles);
}
|
4ec152e56ec5e38714c0d66a8082d6401076d8fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "linAlg/matrix.h"
Matrix<float>& matMul(hipblasHandle_t handle, const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& out, bool transA, bool transB) {
auto opA = transA?HIPBLAS_OP_T:HIPBLAS_OP_N;
auto opB = transB?HIPBLAS_OP_T:HIPBLAS_OP_N;
float alpha = 1.0f;
float beta = 0.0f;
auto err = hipblasSgemm(handle, opA, opB, transA?A.getM():A.getN(), transB?B.getN():B.getM(), transA?A.getN():A.getM(),
&alpha, A.getData(), A.getN(), B.getData(), B.getN(), &beta, out.getData(), out.getN());
hipDeviceSynchronize();
if (err) {
std::cout << "Mat mul error: " << err << std::endl;
throw err;
}
return out;
}
Matrix<float>& matMul_Add(hipblasHandle_t handle, const Matrix<float>& A, const Matrix<float>& B, const Matrix<float>& C, Matrix<float>& out, bool transA, bool transB) {
float alpha = 1.0f;
auto opA = transA?HIPBLAS_OP_T:HIPBLAS_OP_N;
auto opB = transB?HIPBLAS_OP_T:HIPBLAS_OP_N;
out.gpuSetValues(C.getData());
auto err = hipblasSgemm(handle, opA, opB, transA?A.getM():A.getN(), transB?B.getN():B.getM(), transA?A.getN():A.getM(),
&alpha, A.getData(), A.getN(), B.getData(), B.getN(), &alpha, out.getData(), out.getN());
hipDeviceSynchronize();
if (err) {
std::cout << "Mat mul add error: " << err << std::endl;
throw err;
}
return out;
}
Matrix<float>& add(hipblasHandle_t handle, const Matrix<float>& A, Matrix<float>& out, float alpha) {
auto err = hipblasSaxpy(handle, A.getN()*A.getM(), &alpha, A.getData(), 1, out.getData(), 1);
hipDeviceSynchronize();
if (err) {
std::cout << "add error: " << err << std::endl;
throw err;
}
}
| 4ec152e56ec5e38714c0d66a8082d6401076d8fb.cu | #include "linAlg/matrix.h"
Matrix<float>& matMul(cublasHandle_t handle, const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& out, bool transA, bool transB) {
auto opA = transA?CUBLAS_OP_T:CUBLAS_OP_N;
auto opB = transB?CUBLAS_OP_T:CUBLAS_OP_N;
float alpha = 1.0f;
float beta = 0.0f;
auto err = cublasSgemm(handle, opA, opB, transA?A.getM():A.getN(), transB?B.getN():B.getM(), transA?A.getN():A.getM(),
&alpha, A.getData(), A.getN(), B.getData(), B.getN(), &beta, out.getData(), out.getN());
cudaDeviceSynchronize();
if (err) {
std::cout << "Mat mul error: " << err << std::endl;
throw err;
}
return out;
}
Matrix<float>& matMul_Add(cublasHandle_t handle, const Matrix<float>& A, const Matrix<float>& B, const Matrix<float>& C, Matrix<float>& out, bool transA, bool transB) {
float alpha = 1.0f;
auto opA = transA?CUBLAS_OP_T:CUBLAS_OP_N;
auto opB = transB?CUBLAS_OP_T:CUBLAS_OP_N;
out.gpuSetValues(C.getData());
auto err = cublasSgemm(handle, opA, opB, transA?A.getM():A.getN(), transB?B.getN():B.getM(), transA?A.getN():A.getM(),
&alpha, A.getData(), A.getN(), B.getData(), B.getN(), &alpha, out.getData(), out.getN());
cudaDeviceSynchronize();
if (err) {
std::cout << "Mat mul add error: " << err << std::endl;
throw err;
}
return out;
}
Matrix<float>& add(cublasHandle_t handle, const Matrix<float>& A, Matrix<float>& out, float alpha) {
auto err = cublasSaxpy(handle, A.getN()*A.getM(), &alpha, A.getData(), 1, out.getData(), 1);
cudaDeviceSynchronize();
if (err) {
std::cout << "add error: " << err << std::endl;
throw err;
}
}
|
2af435d5e9aab7212814a02d59e2cbdd16941d37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This sample implements Mersenne Twister random number generator
* and Cartesian Box-Muller transformation on the GPU.
* See supplied whitepaper for more explanations.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <cutil_inline.h>
///////////////////////////////////////////////////////////////////////////////
// Common host and device function
///////////////////////////////////////////////////////////////////////////////
//ceil(a / b)
extern "C" int iDivUp(int a, int b){
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
//floor(a / b)
extern "C" int iDivDown(int a, int b){
return a / b;
}
//Align a to nearest higher multiple of b
extern "C" int iAlignUp(int a, int b){
return ((a % b) != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
extern "C" int iAlignDown(int a, int b){
return a - a % b;
}
///////////////////////////////////////////////////////////////////////////////
// Reference MT front-end and Box-Muller transform
///////////////////////////////////////////////////////////////////////////////
extern "C" void initMTRef(const char *fname);
extern "C" void RandomRef(float *h_Random, int NPerRng, unsigned int seed);
extern "C" void BoxMullerRef(float *h_Random, int NPerRng);
///////////////////////////////////////////////////////////////////////////////
// Fast GPU random number generator and Box-Muller transform
///////////////////////////////////////////////////////////////////////////////
#include "MersenneTwister_kernel.cu"
///////////////////////////////////////////////////////////////////////////////
// Data configuration
///////////////////////////////////////////////////////////////////////////////
const int PATH_N = 24000000;
const int N_PER_RNG = iAlignUp(iDivUp(PATH_N, MT_RNG_COUNT), 2);
const int RAND_N = MT_RNG_COUNT * N_PER_RNG;
const unsigned int SEED = 777;
//#define DO_BOXMULLER
///////////////////////////////////////////////////////////////////////////////
// Main program
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv){
float
*d_Rand;
float
*h_RandCPU,
*h_RandGPU;
double
rCPU, rGPU, delta, sum_delta, max_delta, sum_ref, L1norm, gpuTime;
int i, j;
unsigned int hTimer;
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError( cutCreateTimer(&hTimer) );
printf("Initializing data for %i samples...\n", PATH_N);
h_RandCPU = (float *)malloc(RAND_N * sizeof(float));
h_RandGPU = (float *)malloc(RAND_N * sizeof(float));
cutilSafeCall( hipMalloc((void **)&d_Rand, RAND_N * sizeof(float)) );
printf("Loading CPU and GPU twisters configurations...\n");
const char *raw_path = cutFindFilePath("MersenneTwister.raw", argv[0]);
const char *dat_path = cutFindFilePath("MersenneTwister.dat", argv[0]);
initMTRef(raw_path);
loadMTGPU(dat_path);
seedMTGPU(SEED);
printf("Generating random numbers on GPU...\n");
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
hipLaunchKernelGGL(( RandomGPU), dim3(32), dim3(128), 0, 0, d_Rand, N_PER_RNG);
cutilCheckMsg("RandomGPU() execution failed\n");
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
gpuTime = cutGetTimerValue(hTimer);
printf("Generated samples : %i \n", RAND_N);
printf("RandomGPU() time : %f \n", gpuTime);
printf("Samples per second: %E \n", RAND_N / (gpuTime * 0.001));
#ifdef DO_BOXMULLER
printf("Applying Box-Muller transformation on GPU...\n");
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
hipLaunchKernelGGL(( BoxMullerGPU), dim3(32), dim3(128), 0, 0, d_Rand, N_PER_RNG);
cutilCheckMsg("BoxMullerGPU() execution failed\n");
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
gpuTime = cutGetTimerValue(hTimer);
printf("Transformed samples : %i \n", RAND_N);
printf("BoxMullerGPU() time : %f \n", gpuTime);
printf("Samples per second : %E \n", RAND_N / (gpuTime * 0.001));
#endif
printf("Reading back the results...\n");
cutilSafeCall( hipMemcpy(h_RandGPU, d_Rand, RAND_N * sizeof(float), hipMemcpyDeviceToHost) );
printf("Checking GPU results...\n");
printf("...generating random numbers on CPU using reference generator\n");
RandomRef(h_RandCPU, N_PER_RNG, SEED);
#ifdef DO_BOXMULLER
printf("...applying Box-Muller transformation on CPU\n");
BoxMullerRef(h_RandCPU, N_PER_RNG);
#endif
printf("...comparing the results\n");
max_delta = 0;
sum_delta = 0;
sum_ref = 0;
for(i = 0; i < MT_RNG_COUNT; i++)
for(j = 0; j < N_PER_RNG; j++){
rCPU = h_RandCPU[i * N_PER_RNG + j];
rGPU = h_RandGPU[i + j * MT_RNG_COUNT];
delta = fabs(rCPU - rGPU);
sum_delta += delta;
sum_ref += fabs(rCPU);
if(delta >= max_delta) max_delta = delta;
}
L1norm = (float)(sum_delta / sum_ref);
printf("Max absolute error: %E\n", max_delta);
printf("L1 norm: %E\n", L1norm);
printf((L1norm < 1e-6) ? "TEST PASSED\n" : "TEST FAILED\n");
printf("Shutting down...\n");
cutilSafeCall( hipFree(d_Rand) );
free(h_RandGPU);
free(h_RandCPU);
cutilCheckError( cutDeleteTimer( hTimer) );
hipDeviceReset();
cutilExit(argc, argv);
}
| 2af435d5e9aab7212814a02d59e2cbdd16941d37.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/*
* This sample implements Mersenne Twister random number generator
* and Cartesian Box-Muller transformation on the GPU.
* See supplied whitepaper for more explanations.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <cutil_inline.h>
///////////////////////////////////////////////////////////////////////////////
// Common host and device function
///////////////////////////////////////////////////////////////////////////////
//ceil(a / b)
extern "C" int iDivUp(int a, int b){
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
//floor(a / b)
extern "C" int iDivDown(int a, int b){
return a / b;
}
//Align a to nearest higher multiple of b
extern "C" int iAlignUp(int a, int b){
return ((a % b) != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
extern "C" int iAlignDown(int a, int b){
return a - a % b;
}
///////////////////////////////////////////////////////////////////////////////
// Reference MT front-end and Box-Muller transform
///////////////////////////////////////////////////////////////////////////////
extern "C" void initMTRef(const char *fname);
extern "C" void RandomRef(float *h_Random, int NPerRng, unsigned int seed);
extern "C" void BoxMullerRef(float *h_Random, int NPerRng);
///////////////////////////////////////////////////////////////////////////////
// Fast GPU random number generator and Box-Muller transform
///////////////////////////////////////////////////////////////////////////////
#include "MersenneTwister_kernel.cu"
///////////////////////////////////////////////////////////////////////////////
// Data configuration
///////////////////////////////////////////////////////////////////////////////
const int PATH_N = 24000000;
const int N_PER_RNG = iAlignUp(iDivUp(PATH_N, MT_RNG_COUNT), 2);
const int RAND_N = MT_RNG_COUNT * N_PER_RNG;
const unsigned int SEED = 777;
//#define DO_BOXMULLER
///////////////////////////////////////////////////////////////////////////////
// Main program
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv){
float
*d_Rand;
float
*h_RandCPU,
*h_RandGPU;
double
rCPU, rGPU, delta, sum_delta, max_delta, sum_ref, L1norm, gpuTime;
int i, j;
unsigned int hTimer;
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError( cutCreateTimer(&hTimer) );
printf("Initializing data for %i samples...\n", PATH_N);
h_RandCPU = (float *)malloc(RAND_N * sizeof(float));
h_RandGPU = (float *)malloc(RAND_N * sizeof(float));
cutilSafeCall( cudaMalloc((void **)&d_Rand, RAND_N * sizeof(float)) );
printf("Loading CPU and GPU twisters configurations...\n");
const char *raw_path = cutFindFilePath("MersenneTwister.raw", argv[0]);
const char *dat_path = cutFindFilePath("MersenneTwister.dat", argv[0]);
initMTRef(raw_path);
loadMTGPU(dat_path);
seedMTGPU(SEED);
printf("Generating random numbers on GPU...\n");
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
RandomGPU<<<32, 128>>>(d_Rand, N_PER_RNG);
cutilCheckMsg("RandomGPU() execution failed\n");
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
gpuTime = cutGetTimerValue(hTimer);
printf("Generated samples : %i \n", RAND_N);
printf("RandomGPU() time : %f \n", gpuTime);
printf("Samples per second: %E \n", RAND_N / (gpuTime * 0.001));
#ifdef DO_BOXMULLER
printf("Applying Box-Muller transformation on GPU...\n");
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
BoxMullerGPU<<<32, 128>>>(d_Rand, N_PER_RNG);
cutilCheckMsg("BoxMullerGPU() execution failed\n");
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
gpuTime = cutGetTimerValue(hTimer);
printf("Transformed samples : %i \n", RAND_N);
printf("BoxMullerGPU() time : %f \n", gpuTime);
printf("Samples per second : %E \n", RAND_N / (gpuTime * 0.001));
#endif
printf("Reading back the results...\n");
cutilSafeCall( cudaMemcpy(h_RandGPU, d_Rand, RAND_N * sizeof(float), cudaMemcpyDeviceToHost) );
printf("Checking GPU results...\n");
printf("...generating random numbers on CPU using reference generator\n");
RandomRef(h_RandCPU, N_PER_RNG, SEED);
#ifdef DO_BOXMULLER
printf("...applying Box-Muller transformation on CPU\n");
BoxMullerRef(h_RandCPU, N_PER_RNG);
#endif
printf("...comparing the results\n");
max_delta = 0;
sum_delta = 0;
sum_ref = 0;
for(i = 0; i < MT_RNG_COUNT; i++)
for(j = 0; j < N_PER_RNG; j++){
rCPU = h_RandCPU[i * N_PER_RNG + j];
rGPU = h_RandGPU[i + j * MT_RNG_COUNT];
delta = fabs(rCPU - rGPU);
sum_delta += delta;
sum_ref += fabs(rCPU);
if(delta >= max_delta) max_delta = delta;
}
L1norm = (float)(sum_delta / sum_ref);
printf("Max absolute error: %E\n", max_delta);
printf("L1 norm: %E\n", L1norm);
printf((L1norm < 1e-6) ? "TEST PASSED\n" : "TEST FAILED\n");
printf("Shutting down...\n");
cutilSafeCall( cudaFree(d_Rand) );
free(h_RandGPU);
free(h_RandCPU);
cutilCheckError( cutDeleteTimer( hTimer) );
cudaThreadExit();
cutilExit(argc, argv);
}
|
11fbf88069b182b3f97b7d7bbc42c60b43e1f86e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <hip/hip_complex.h>
#include "rocblas.h"
#include <hip/hip_runtime.h>
#include "GMRES.h"
extern "C" HOST void printMatrix(cuFloatComplex *A, const int row, const int col,
const int lda)
{
float x, y;
int i, j;
for (i=0;i<row;i++) {
for (j=0;j<col;j++) {
x = cuCrealf(A[IDXC0(i,j,lda)]);
y = cuCimagf(A[IDXC0(i,j,lda)]);
printf("(%f,%f) ",x,y);
}
printf("\n");
}
}
extern "C" HOST void Rsolver(const cuFloatComplex *R, const cuFloatComplex *b,
const int m, cuFloatComplex *x)
{
int i, j;
cuFloatComplex temp[6];
for(i=m-1;i>=0;i--) {
temp[0] = b[i];
temp[1] = R[IDXC0(i,i,m)];
for(j=m-1;j>i;j--) {
temp[2] = x[j];
temp[3] = R[IDXC0(i,j,m)];
temp[4] = cuCmulf(temp[2],temp[3]);
temp[0] = cuCsubf(temp[0],temp[4]);
}
x[i] = cuCdivf(temp[0],temp[1]);
}
}
extern "C" HOST void givens_coeffs(const cuFloatComplex rho,
const cuFloatComplex sigma, cuFloatComplex *c, cuFloatComplex *s)
{
cuFloatComplex rho_b = cuConjf(rho), sigma_b = cuConjf(sigma);
float x, y, mag;
mag = sqrt(pow(cuCabsf(rho),2)+pow(cuCabsf(sigma),2));
x = cuCrealf(rho_b)/mag;
y = cuCimagf(rho_b)/mag;
*c = make_cuFloatComplex(x,y);
x = cuCrealf(sigma_b)/mag;
y = cuCimagf(sigma_b)/mag;
*s = make_cuFloatComplex(x,y);
}
extern "C" HOST void apply_givens(const int m, const int k, cuFloatComplex *c,
cuFloatComplex *s, cuFloatComplex *h)
{
cuFloatComplex c_k, s_k, c_b, s_b, temp[7];
float x, y;
int i;
for(i=0;i<k-1;i++) {
temp[0] = cuCmulf(c[i],h[i]);
temp[1] = cuCmulf(s[i],h[i+1]);
temp[2] = cuCaddf(temp[0],temp[1]);
c_b = cuConjf(c[i]);
s_b = cuConjf(s[i]);
x = cuCrealf(s_b);
y = cuCimagf(s_b);
temp[3] = make_cuFloatComplex(-x,-y);
temp[4] = cuCmulf(temp[3],h[i]);
temp[5] = cuCmulf(c_b,h[i+1]);
temp[6] = cuCaddf(temp[4],temp[5]);
h[i+1] = temp[6];
h[i] = temp[2];
}
if(k<m) {
givens_coeffs(h[k-1],h[k],&c_k,&s_k);
c[k-1] = c_k;
s[k-1] = s_k;
temp[0] = cuCmulf(c_k,h[k-1]);
temp[1] = cuCmulf(s_k,h[k]);
h[k-1] = cuCaddf(temp[0],temp[1]);
h[k] = make_cuFloatComplex(0,0);
}
}
extern "C" HOST int arnoldi(const cuFloatComplex *A_h, const int k, const int m,
cuFloatComplex *Q_h, cuFloatComplex *H_h)
{
if(k>m) {
printf("Error with input k.\n");
return EXIT_FAILURE;
}
float v_real, v_imag;
int i;
float nrm;
cuFloatComplex alpha, beta, prod;
alpha = make_cuFloatComplex(1,0);
beta = make_cuFloatComplex(0,0);
cuFloatComplex *A_d, *q_d, *y_d;
hipblasHandle_t handle;
CUBLAS_CALL(hipblasCreate(&handle));
CUDA_CALL(hipMalloc(&A_d,m*m*sizeof(cuFloatComplex)));
CUDA_CALL(hipMemcpy(A_d,A_h,m*m*sizeof(cuFloatComplex),
hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc(&q_d,m*sizeof(cuFloatComplex)));
CUDA_CALL(hipMemcpy(q_d,&Q_h[IDXC0(0,k-1,m)],m*sizeof(cuFloatComplex),
hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc(&y_d,m*sizeof(cuFloatComplex)));
CUBLAS_CALL(hipblasCgemv_v2(handle,HIPBLAS_OP_N,m,m,&alpha,A_d,m,q_d,1,&beta,
y_d,1)); //Aq
CUDA_CALL(hipFree(A_d));
for(i=1;i<=k;i++) {
CUDA_CALL(hipMemcpy(q_d,&Q_h[IDXC0(0,i-1,m)],m*sizeof(cuFloatComplex),
hipMemcpyHostToDevice));
CUBLAS_CALL(hipblasCdotc_v2(handle,m,q_d,1,y_d,1,&prod));
H_h[IDXC0(i-1,k-1,m)] = prod;
v_real = cuCrealf(prod);
v_imag = cuCimagf(prod);
alpha = make_cuFloatComplex(-v_real,-v_imag);
CUBLAS_CALL(hipblasCaxpy_v2(handle,m,&alpha,q_d,1,y_d,1));
}
CUBLAS_CALL(hipblasScnrm2_v2(handle,m,y_d,1,&nrm));
if(k<m) {
H_h[IDXC0(k,k-1,m)] = make_cuFloatComplex(nrm,0);
alpha = make_cuFloatComplex(1.0/nrm,0);
CUDA_CALL(hipMemset(q_d,0,m*sizeof(cuFloatComplex)));
CUBLAS_CALL(hipblasCaxpy_v2(handle,m,&alpha,y_d,1,q_d,1));
CUDA_CALL(hipMemcpy(&Q_h[IDXC0(0,k,m)],q_d,m*sizeof(cuFloatComplex),
hipMemcpyDeviceToHost));
}
//printf("Q in arnoldi: \n");
//printMatrix(Q_h,m,m,m);
CUDA_CALL(hipFree(y_d));
CUDA_CALL(hipFree(q_d));
CUBLAS_CALL(hipblasDestroy(handle));
return EXIT_SUCCESS;
}
int GMRES(const cuFloatComplex *A_h, const cuFloatComplex *b_h, const int m,
const int maxIter, const float threshold, cuFloatComplex *x_h)
{
//printf("input x: \n");
//printMatrix(x_h,m,1,m);
int i, j, t;
float x, y;
hipblasHandle_t handle;
cuFloatComplex alpha, beta;
cuFloatComplex *Q_h = (cuFloatComplex*)malloc(m*m*sizeof(cuFloatComplex));
cuFloatComplex *H_h = (cuFloatComplex*)malloc(m*m*sizeof(cuFloatComplex));
cuFloatComplex *c = (cuFloatComplex*)malloc(m*sizeof(cuFloatComplex));
cuFloatComplex *s = (cuFloatComplex*)malloc(m*sizeof(cuFloatComplex));
cuFloatComplex *h = (cuFloatComplex*)malloc(m*sizeof(cuFloatComplex));
cuFloatComplex *err_h = (cuFloatComplex*)malloc(m*sizeof(cuFloatComplex));
cuFloatComplex *A_d, *x_d, *r_d, *q_d;
CUDA_CALL(hipMalloc(&A_d,m*m*sizeof(cuFloatComplex)));
CUDA_CALL(hipMemcpy(A_d,A_h,m*m*sizeof(cuFloatComplex),
hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc(&x_d,m*sizeof(cuFloatComplex)));
CUDA_CALL(hipMemcpy(x_d,x_h,m*sizeof(cuFloatComplex),
hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc(&r_d,m*sizeof(cuFloatComplex)));
CUDA_CALL(hipMemcpy(r_d,b_h,m*sizeof(cuFloatComplex),
hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc(&q_d,m*sizeof(cuFloatComplex)));
float nrm_b, nrm_r;
CUBLAS_CALL(hipblasCreate(&handle));
CUBLAS_CALL(hipblasScnrm2_v2(handle,m,r_d,1,&nrm_b)); //norm of b vector
alpha = make_cuFloatComplex(-1,0);
beta = make_cuFloatComplex(1,0);
CUBLAS_CALL(hipblasCgemv_v2(handle,HIPBLAS_OP_N,m,m,&alpha,A_d,m,x_d,1,&beta,
r_d,1)); //r = b-Ax
CUDA_CALL(hipFree(A_d));
CUBLAS_CALL(hipblasScnrm2_v2(handle,m,r_d,1,&nrm_r)); //norm of r vector
err_h[0] = make_cuFloatComplex(nrm_r,0);
//printf("nrm_r=%f\n",nrm_r);
if(nrm_r/nrm_b<threshold) {
printf("The initial x is accurate enough.\n");
CUDA_CALL(hipFree(q_d));
CUDA_CALL(hipFree(r_d));
CUDA_CALL(hipFree(x_d));
free(err_h);
free(h);
free(s);
free(c);
free(H_h);
free(Q_h);
return EXIT_SUCCESS;
}
CUDA_CALL(hipMemset(q_d,0,m*sizeof(cuFloatComplex)));
alpha = make_cuFloatComplex(1.0/nrm_r,0);
CUBLAS_CALL(hipblasCaxpy_v2(handle,m,&alpha,r_d,1,q_d,1));
CUDA_CALL(hipMemcpy(&Q_h[IDXC0(0,0,m)],q_d,m*sizeof(cuFloatComplex),
hipMemcpyDeviceToHost));
i = 1;
while(1) {
arnoldi(A_h,i,m,Q_h,H_h);
//printf("H_h before givens: \n");
//printMatrix(H_h,m,i,m);
if(i<m) {
CUDA_CALL(hipMemcpy(h,&H_h[IDXC0(0,i-1,m)],
(i+1)*sizeof(cuFloatComplex),hipMemcpyHostToHost));
apply_givens(m,i,c,s,h);
CUDA_CALL(hipMemcpy(&H_h[IDXC0(0,i-1,m)],h,
(i+1)*sizeof(cuFloatComplex),hipMemcpyHostToHost));
alpha = cuConjf(s[i-1]);
x = cuCrealf(alpha);
y = cuCimagf(alpha);
beta = make_cuFloatComplex(-x,-y);
err_h[i] = cuCmulf(beta,err_h[i-1]);
err_h[i-1] = cuCmulf(c[i-1],err_h[i-1]);
} else {
CUDA_CALL(hipMemcpy(h,&H_h[IDXC0(0,i-1,m)],m*sizeof(cuFloatComplex),
hipMemcpyHostToHost));
apply_givens(m,i,c,s,h);
CUDA_CALL(hipMemcpy(&H_h[IDXC0(0,i-1,m)],h,m*sizeof(cuFloatComplex),
hipMemcpyHostToHost));
}
if((i<m && cuCabsf(err_h[i])/nrm_b<threshold) || i>=maxIter) {
break;
}
i++;
printf("Iteration: %d\n",i);
}
//printf("c: \n");
//printMatrix(c,m-1,1,m);
//printf("s: \n");
//printMatrix(s,m-1,1,m);
//printf("err_h: \n");
//printMatrix(err_h,m,1,m);
t = i;
cuFloatComplex *y_h = (cuFloatComplex*)malloc(t*sizeof(cuFloatComplex));
cuFloatComplex *R_h = (cuFloatComplex*)malloc(t*t*sizeof(cuFloatComplex));
for(i=0;i<t;i++) {
for(j=0;j<t;j++) {
R_h[IDXC0(i,j,t)] = H_h[IDXC0(i,j,m)];
}
}
//printf("H_h: \n");
//printMatrix(H_h,m,m,m);
//printf("R_h: \n");
//printMatrix(R_h,i,i,i);
Rsolver(R_h,err_h,t,y_h);
free(R_h);
//printf("y_h: \n");
//printMatrix(y_h,t,1,t);
cuFloatComplex *y_d, *Q_d;
CUDA_CALL(hipMalloc(&y_d,t*sizeof(cuFloatComplex)));
CUDA_CALL(hipMemcpy(y_d,y_h,t*sizeof(cuFloatComplex),
hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc(&Q_d,m*t*sizeof(cuFloatComplex)));
CUDA_CALL(hipMemcpy(Q_d,Q_h,m*t*sizeof(cuFloatComplex),
hipMemcpyHostToDevice));
alpha = make_cuFloatComplex(1,0);
beta = make_cuFloatComplex(1,0);
CUBLAS_CALL(hipblasCgemv_v2(handle,HIPBLAS_OP_N,m,t,&alpha,Q_d,m,y_d,1,&beta,
x_d,1));
CUDA_CALL(hipMemcpy(x_h,x_d,m*sizeof(cuFloatComplex),
hipMemcpyDeviceToHost));
CUBLAS_CALL(hipblasDestroy(handle));
CUDA_CALL(hipFree(y_d));
CUDA_CALL(hipFree(Q_d));
CUDA_CALL(hipFree(q_d));
CUDA_CALL(hipFree(r_d));
CUDA_CALL(hipFree(x_d));
free(err_h);
free(h);
free(s);
free(c);
free(H_h);
free(Q_h);
return EXIT_SUCCESS;
}
| 11fbf88069b182b3f97b7d7bbc42c60b43e1f86e.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <cuComplex.h>
#include "cublas_v2.h"
#include <cuda_runtime.h>
#include "GMRES.h"
extern "C" HOST void printMatrix(cuFloatComplex *A, const int row, const int col,
const int lda)
{
float x, y;
int i, j;
for (i=0;i<row;i++) {
for (j=0;j<col;j++) {
x = cuCrealf(A[IDXC0(i,j,lda)]);
y = cuCimagf(A[IDXC0(i,j,lda)]);
printf("(%f,%f) ",x,y);
}
printf("\n");
}
}
extern "C" HOST void Rsolver(const cuFloatComplex *R, const cuFloatComplex *b,
const int m, cuFloatComplex *x)
{
int i, j;
cuFloatComplex temp[6];
for(i=m-1;i>=0;i--) {
temp[0] = b[i];
temp[1] = R[IDXC0(i,i,m)];
for(j=m-1;j>i;j--) {
temp[2] = x[j];
temp[3] = R[IDXC0(i,j,m)];
temp[4] = cuCmulf(temp[2],temp[3]);
temp[0] = cuCsubf(temp[0],temp[4]);
}
x[i] = cuCdivf(temp[0],temp[1]);
}
}
extern "C" HOST void givens_coeffs(const cuFloatComplex rho,
const cuFloatComplex sigma, cuFloatComplex *c, cuFloatComplex *s)
{
cuFloatComplex rho_b = cuConjf(rho), sigma_b = cuConjf(sigma);
float x, y, mag;
mag = sqrt(pow(cuCabsf(rho),2)+pow(cuCabsf(sigma),2));
x = cuCrealf(rho_b)/mag;
y = cuCimagf(rho_b)/mag;
*c = make_cuFloatComplex(x,y);
x = cuCrealf(sigma_b)/mag;
y = cuCimagf(sigma_b)/mag;
*s = make_cuFloatComplex(x,y);
}
extern "C" HOST void apply_givens(const int m, const int k, cuFloatComplex *c,
cuFloatComplex *s, cuFloatComplex *h)
{
cuFloatComplex c_k, s_k, c_b, s_b, temp[7];
float x, y;
int i;
for(i=0;i<k-1;i++) {
temp[0] = cuCmulf(c[i],h[i]);
temp[1] = cuCmulf(s[i],h[i+1]);
temp[2] = cuCaddf(temp[0],temp[1]);
c_b = cuConjf(c[i]);
s_b = cuConjf(s[i]);
x = cuCrealf(s_b);
y = cuCimagf(s_b);
temp[3] = make_cuFloatComplex(-x,-y);
temp[4] = cuCmulf(temp[3],h[i]);
temp[5] = cuCmulf(c_b,h[i+1]);
temp[6] = cuCaddf(temp[4],temp[5]);
h[i+1] = temp[6];
h[i] = temp[2];
}
if(k<m) {
givens_coeffs(h[k-1],h[k],&c_k,&s_k);
c[k-1] = c_k;
s[k-1] = s_k;
temp[0] = cuCmulf(c_k,h[k-1]);
temp[1] = cuCmulf(s_k,h[k]);
h[k-1] = cuCaddf(temp[0],temp[1]);
h[k] = make_cuFloatComplex(0,0);
}
}
extern "C" HOST int arnoldi(const cuFloatComplex *A_h, const int k, const int m,
cuFloatComplex *Q_h, cuFloatComplex *H_h)
{
if(k>m) {
printf("Error with input k.\n");
return EXIT_FAILURE;
}
float v_real, v_imag;
int i;
float nrm;
cuFloatComplex alpha, beta, prod;
alpha = make_cuFloatComplex(1,0);
beta = make_cuFloatComplex(0,0);
cuFloatComplex *A_d, *q_d, *y_d;
cublasHandle_t handle;
CUBLAS_CALL(cublasCreate_v2(&handle));
CUDA_CALL(cudaMalloc(&A_d,m*m*sizeof(cuFloatComplex)));
CUDA_CALL(cudaMemcpy(A_d,A_h,m*m*sizeof(cuFloatComplex),
cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc(&q_d,m*sizeof(cuFloatComplex)));
CUDA_CALL(cudaMemcpy(q_d,&Q_h[IDXC0(0,k-1,m)],m*sizeof(cuFloatComplex),
cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc(&y_d,m*sizeof(cuFloatComplex)));
CUBLAS_CALL(cublasCgemv_v2(handle,CUBLAS_OP_N,m,m,&alpha,A_d,m,q_d,1,&beta,
y_d,1)); //Aq
CUDA_CALL(cudaFree(A_d));
for(i=1;i<=k;i++) {
CUDA_CALL(cudaMemcpy(q_d,&Q_h[IDXC0(0,i-1,m)],m*sizeof(cuFloatComplex),
cudaMemcpyHostToDevice));
CUBLAS_CALL(cublasCdotc_v2(handle,m,q_d,1,y_d,1,&prod));
H_h[IDXC0(i-1,k-1,m)] = prod;
v_real = cuCrealf(prod);
v_imag = cuCimagf(prod);
alpha = make_cuFloatComplex(-v_real,-v_imag);
CUBLAS_CALL(cublasCaxpy_v2(handle,m,&alpha,q_d,1,y_d,1));
}
CUBLAS_CALL(cublasScnrm2_v2(handle,m,y_d,1,&nrm));
if(k<m) {
H_h[IDXC0(k,k-1,m)] = make_cuFloatComplex(nrm,0);
alpha = make_cuFloatComplex(1.0/nrm,0);
CUDA_CALL(cudaMemset(q_d,0,m*sizeof(cuFloatComplex)));
CUBLAS_CALL(cublasCaxpy_v2(handle,m,&alpha,y_d,1,q_d,1));
CUDA_CALL(cudaMemcpy(&Q_h[IDXC0(0,k,m)],q_d,m*sizeof(cuFloatComplex),
cudaMemcpyDeviceToHost));
}
//printf("Q in arnoldi: \n");
//printMatrix(Q_h,m,m,m);
CUDA_CALL(cudaFree(y_d));
CUDA_CALL(cudaFree(q_d));
CUBLAS_CALL(cublasDestroy_v2(handle));
return EXIT_SUCCESS;
}
int GMRES(const cuFloatComplex *A_h, const cuFloatComplex *b_h, const int m,
const int maxIter, const float threshold, cuFloatComplex *x_h)
{
//printf("input x: \n");
//printMatrix(x_h,m,1,m);
int i, j, t;
float x, y;
cublasHandle_t handle;
cuFloatComplex alpha, beta;
cuFloatComplex *Q_h = (cuFloatComplex*)malloc(m*m*sizeof(cuFloatComplex));
cuFloatComplex *H_h = (cuFloatComplex*)malloc(m*m*sizeof(cuFloatComplex));
cuFloatComplex *c = (cuFloatComplex*)malloc(m*sizeof(cuFloatComplex));
cuFloatComplex *s = (cuFloatComplex*)malloc(m*sizeof(cuFloatComplex));
cuFloatComplex *h = (cuFloatComplex*)malloc(m*sizeof(cuFloatComplex));
cuFloatComplex *err_h = (cuFloatComplex*)malloc(m*sizeof(cuFloatComplex));
cuFloatComplex *A_d, *x_d, *r_d, *q_d;
CUDA_CALL(cudaMalloc(&A_d,m*m*sizeof(cuFloatComplex)));
CUDA_CALL(cudaMemcpy(A_d,A_h,m*m*sizeof(cuFloatComplex),
cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc(&x_d,m*sizeof(cuFloatComplex)));
CUDA_CALL(cudaMemcpy(x_d,x_h,m*sizeof(cuFloatComplex),
cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc(&r_d,m*sizeof(cuFloatComplex)));
CUDA_CALL(cudaMemcpy(r_d,b_h,m*sizeof(cuFloatComplex),
cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc(&q_d,m*sizeof(cuFloatComplex)));
float nrm_b, nrm_r;
CUBLAS_CALL(cublasCreate_v2(&handle));
CUBLAS_CALL(cublasScnrm2_v2(handle,m,r_d,1,&nrm_b)); //norm of b vector
alpha = make_cuFloatComplex(-1,0);
beta = make_cuFloatComplex(1,0);
CUBLAS_CALL(cublasCgemv_v2(handle,CUBLAS_OP_N,m,m,&alpha,A_d,m,x_d,1,&beta,
r_d,1)); //r = b-Ax
CUDA_CALL(cudaFree(A_d));
CUBLAS_CALL(cublasScnrm2_v2(handle,m,r_d,1,&nrm_r)); //norm of r vector
err_h[0] = make_cuFloatComplex(nrm_r,0);
//printf("nrm_r=%f\n",nrm_r);
if(nrm_r/nrm_b<threshold) {
printf("The initial x is accurate enough.\n");
CUDA_CALL(cudaFree(q_d));
CUDA_CALL(cudaFree(r_d));
CUDA_CALL(cudaFree(x_d));
free(err_h);
free(h);
free(s);
free(c);
free(H_h);
free(Q_h);
return EXIT_SUCCESS;
}
CUDA_CALL(cudaMemset(q_d,0,m*sizeof(cuFloatComplex)));
alpha = make_cuFloatComplex(1.0/nrm_r,0);
CUBLAS_CALL(cublasCaxpy_v2(handle,m,&alpha,r_d,1,q_d,1));
CUDA_CALL(cudaMemcpy(&Q_h[IDXC0(0,0,m)],q_d,m*sizeof(cuFloatComplex),
cudaMemcpyDeviceToHost));
i = 1;
while(1) {
arnoldi(A_h,i,m,Q_h,H_h);
//printf("H_h before givens: \n");
//printMatrix(H_h,m,i,m);
if(i<m) {
CUDA_CALL(cudaMemcpy(h,&H_h[IDXC0(0,i-1,m)],
(i+1)*sizeof(cuFloatComplex),cudaMemcpyHostToHost));
apply_givens(m,i,c,s,h);
CUDA_CALL(cudaMemcpy(&H_h[IDXC0(0,i-1,m)],h,
(i+1)*sizeof(cuFloatComplex),cudaMemcpyHostToHost));
alpha = cuConjf(s[i-1]);
x = cuCrealf(alpha);
y = cuCimagf(alpha);
beta = make_cuFloatComplex(-x,-y);
err_h[i] = cuCmulf(beta,err_h[i-1]);
err_h[i-1] = cuCmulf(c[i-1],err_h[i-1]);
} else {
CUDA_CALL(cudaMemcpy(h,&H_h[IDXC0(0,i-1,m)],m*sizeof(cuFloatComplex),
cudaMemcpyHostToHost));
apply_givens(m,i,c,s,h);
CUDA_CALL(cudaMemcpy(&H_h[IDXC0(0,i-1,m)],h,m*sizeof(cuFloatComplex),
cudaMemcpyHostToHost));
}
if((i<m && cuCabsf(err_h[i])/nrm_b<threshold) || i>=maxIter) {
break;
}
i++;
printf("Iteration: %d\n",i);
}
//printf("c: \n");
//printMatrix(c,m-1,1,m);
//printf("s: \n");
//printMatrix(s,m-1,1,m);
//printf("err_h: \n");
//printMatrix(err_h,m,1,m);
t = i;
cuFloatComplex *y_h = (cuFloatComplex*)malloc(t*sizeof(cuFloatComplex));
cuFloatComplex *R_h = (cuFloatComplex*)malloc(t*t*sizeof(cuFloatComplex));
for(i=0;i<t;i++) {
for(j=0;j<t;j++) {
R_h[IDXC0(i,j,t)] = H_h[IDXC0(i,j,m)];
}
}
//printf("H_h: \n");
//printMatrix(H_h,m,m,m);
//printf("R_h: \n");
//printMatrix(R_h,i,i,i);
Rsolver(R_h,err_h,t,y_h);
free(R_h);
//printf("y_h: \n");
//printMatrix(y_h,t,1,t);
cuFloatComplex *y_d, *Q_d;
CUDA_CALL(cudaMalloc(&y_d,t*sizeof(cuFloatComplex)));
CUDA_CALL(cudaMemcpy(y_d,y_h,t*sizeof(cuFloatComplex),
cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc(&Q_d,m*t*sizeof(cuFloatComplex)));
CUDA_CALL(cudaMemcpy(Q_d,Q_h,m*t*sizeof(cuFloatComplex),
cudaMemcpyHostToDevice));
alpha = make_cuFloatComplex(1,0);
beta = make_cuFloatComplex(1,0);
CUBLAS_CALL(cublasCgemv_v2(handle,CUBLAS_OP_N,m,t,&alpha,Q_d,m,y_d,1,&beta,
x_d,1));
CUDA_CALL(cudaMemcpy(x_h,x_d,m*sizeof(cuFloatComplex),
cudaMemcpyDeviceToHost));
CUBLAS_CALL(cublasDestroy_v2(handle));
CUDA_CALL(cudaFree(y_d));
CUDA_CALL(cudaFree(Q_d));
CUDA_CALL(cudaFree(q_d));
CUDA_CALL(cudaFree(r_d));
CUDA_CALL(cudaFree(x_d));
free(err_h);
free(h);
free(s);
free(c);
free(H_h);
free(Q_h);
return EXIT_SUCCESS;
}
|
f7128c9a8237c4973ab50459c88e4f2075d2dd57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#ifndef FLT_MAX
#define FLT_MAX 3.40282347e+38
#endif
__device__ inline float euclid_dist_2(int tid, int numObjects, int numAttributes, const float* __restrict__ attributes,
int clusterId, const float* __restrict__ clusters){
float ans=0.0;
for(int i = 0; i < numAttributes; i++){
float diff = attributes[tid + i*numObjects] - clusters[i + clusterId*numAttributes];
ans += diff*diff;
}
return ans;
}
__device__ inline int find_nearest_point(int tid, \
int numObjects,
int numAttributes,
const float* __restrict__ attributess,
const float* __restrict__ centers,
int ncenters)
{
int index = 0;
float min_dist = FLT_MAX;
for (int i = 0; i < ncenters; i++) {
float dist;
dist = euclid_dist_2(tid, numObjects, numAttributes, attributess, i, centers);
if (dist < min_dist) {
min_dist = dist;
index = i;
}
}
return(index);
}
__global__ void findNewClusterIndex(int numObjects, int numAttributes, const float* __restrict__ attributes, \
int numClusters, const float* __restrict__ clusters, int* __restrict__ membership, \
int* __restrict__ new_centers_len, float* __restrict__ new_centers, float* __restrict__ delta){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < numObjects){
/* find the index of nestest cluster centers */
int index = find_nearest_point(tid, numObjects, numAttributes, attributes, clusters, numClusters);
/* if membership changes, increase delta by 1 */
if (membership[tid] != index) atomicAdd(delta, 1.0f);
/* assign the membership to object i */
membership[tid] = index;
/* update new cluster centers : sum of objects located within */
atomicAdd(new_centers_len+index, 1);
}
}
__global__ void updateNewCluster(int numObjects, int numAttributes, const float* __restrict__ attributes, \
int numClusters, const float* __restrict__ clusters, int* __restrict__ membership, \
int* __restrict__ new_centers_len, float* __restrict__ new_centers, float* __restrict__ delta, float* __restrict__ s){
// extern __shared__ float s[]; //__shared__
int tid = blockDim.x*blockIdx.x + threadIdx.x;
// for(int i = threadIdx.x; i < numClusters*numAttributes; i += blockDim.x){
// s[i] = 0.0f;
// }
// __syncthreads();
if(tid < numObjects){
int index = membership[tid];
for(int j = 0; j < numAttributes; j++){
atomicAdd(new_centers+index*numAttributes+j, attributes[tid + numObjects*j]);
}
}
__syncthreads();
//for(int i = 0; i < numClusters; i++){
// for(int j = threadIdx.x; j < numAttributes; j += blockDim.x){
// atomicAdd(new_centers+i*numAttributes+j, s[j + numAttributes*i]);
// }
// }
}
__global__ void updateCenter(int numClusters, int numAttributes, float* __restrict__ clusters,\
int* __restrict__ new_centers_len, float* __restrict__ new_centers){
/* replace old cluster centers with new_centers */
for(int i = blockIdx.x; i < numClusters; i += gridDim.x) {
for(int j = threadIdx.x; j < numAttributes; j += blockDim.x) {
if (new_centers_len[i] > 0)
clusters[i*numAttributes + j] = new_centers[i*numAttributes + j] / new_centers_len[i];
new_centers[i*numAttributes + j] = 0.0; /* set back to 0 */
}
new_centers_len[i] = 0; /* set back to 0 */
}
}
/*----< kmeans_clustering() >---------------------------------------------*/
void kmeans_clustering(int numObjects,
int numAttributes,
float *attributes, /* in: [numObjects][numAttributes] */
int *membership,
int numClusters,
float* clusters,
float threshold){
int *d_new_centers_len; /* [numClusters]: no. of points in each cluster */
hipMalloc((void**)&d_new_centers_len, numClusters*sizeof(int));
float *d_new_centers; /* [numClusters][numAttributes] */
hipMalloc((void**)&d_new_centers, numClusters*numAttributes*sizeof(int));
float *d_delta;
hipMalloc((void**)&d_delta, sizeof(float));
float *d_attributes;
hipMalloc((void**)&d_attributes, numObjects*numAttributes*sizeof(float));
hipMemcpy(d_attributes, attributes, numObjects*numAttributes*sizeof(float), hipMemcpyDefault);
int *d_membership;
hipMalloc((void**)&d_membership, numObjects*sizeof(int));
hipMemcpy(d_membership, membership, numObjects*sizeof(int), hipMemcpyDefault);
float *d_clusters;
hipMalloc((void**)&d_clusters, numClusters*numAttributes*sizeof(float));
hipMemcpy(d_clusters, clusters, numClusters*numAttributes*sizeof(float), hipMemcpyDefault);
float delta = 0.0;
float *s;
s = (float *)calloc(numClusters*numAttributes,sizeof(float));
float *d_s;
hipMalloc((void**)&d_s, numClusters*numAttributes*sizeof(float));
do {
hipMemset(d_new_centers_len, 0, numClusters*sizeof(int));
hipMemset(d_new_centers, 0, numClusters*numAttributes*sizeof(int));
hipMemset(d_delta, 0, sizeof(int));
hipMemset(d_s, 0, numClusters*numAttributes*sizeof(float));
int blockSize = 256;
int gridSize = (numObjects+blockSize-1)/blockSize;
hipLaunchKernelGGL(( findNewClusterIndex), dim3(gridSize), dim3(blockSize), 0, 0, numObjects, numAttributes, d_attributes, numClusters, d_clusters, d_membership, d_new_centers_len, d_new_centers, d_delta);
// updateNewCluster<<<gridSize, blockSize, numClusters*numAttributes*sizeof(float)>>>(numObjects, numAttributes, d_attributes, numClusters, d_clusters, d_membership, d_new_centers_len, d_new_centers, d_delta);
hipLaunchKernelGGL(( updateNewCluster), dim3(gridSize), dim3(blockSize), 0, 0, numObjects, numAttributes, d_attributes, numClusters, d_clusters, d_membership, d_new_centers_len, d_new_centers, d_delta, d_s);
hipMemcpy(s, d_s, numClusters*numAttributes*sizeof(float), hipMemcpyDefault);
// for (int i=0; i<numClusters*numAttributes; i++) {
// printf("%f ",s[i]);
// }
// printf("\n\n");
hipLaunchKernelGGL(( updateCenter), dim3(numClusters), dim3(blockSize), 0, 0, numClusters, numAttributes, d_clusters, d_new_centers_len, d_new_centers);
//delta /= numObjects;
hipMemcpy(&delta, d_delta, sizeof(float), hipMemcpyDefault);
printf("%.3f %.3f\n", delta, threshold);
} while (delta > threshold);
hipMemcpy(clusters, d_clusters, numClusters*numAttributes*sizeof(float), hipMemcpyDefault);
hipMemcpy(membership,d_membership,numObjects*sizeof(int),hipMemcpyDefault);
hipFree(d_new_centers_len);
hipFree(d_new_centers);
hipFree(d_delta);
hipFree(d_attributes);
hipFree(d_membership);
hipFree(d_clusters);
hipFree(d_s);
free(s);
}
int main(int argc, char **argv) {
FILE * fin=fopen("oData.txt","r");
FILE * fout=fopen("output.txt","w");
int nums = 4000000,dim = 128,k = 96;
int thold = 1000;
sscanf(argv[1],"%d",&k);
sscanf(argv[2],"%d",&nums);
printf("k=%d\n",k);
printf("n=%d\n",nums);
int *membership,*countM;
float *data,*cluster,tmp;
srand((unsigned)time(NULL));
membership = (int *)calloc(nums,sizeof(int));
data = (float *)calloc(nums*dim,sizeof(float));
cluster = (float *)calloc(k*dim,sizeof(float));
countM = (int *)calloc(k,sizeof(int));
for (int i=0; i<nums; i++)
for (int j=0; j<dim; j++) {
fscanf(fin,"%f",&tmp);
data[i+j*nums] = tmp;
}
//for (int i=0; i<nums*dim; i++) printf("%f ",data[i]);
for (int i=0; i<nums; i++) {
membership[i] = 0;
}
int target;
for (int i=0; i<k; i++)
for (int j=0; j<dim; j++) {
//cluster[i*dim+j] = rand()%maxd[j];
target = rand()%nums;
cluster[i*dim+j]=data[target+j*nums];
}
kmeans_clustering(nums,dim,data,membership,k,cluster,thold);
for (int i=0; i<k*dim; i++) {
if (i%dim == 0) fprintf(fout,"\n\n");
fprintf(fout,"%f ",cluster[i]);
}
fprintf(fout,"\n\n-----------------------------------------\n\n");
memset(countM,k*sizeof(int),0);
for (int i=0; i<nums; i++) {
fprintf(fout,"%d ",membership[i]);
countM[membership[i]]++;
}
fprintf(fout,"\n\n-----------------------------------------\n\n");
for (int i=0; i<k; i++) {
fprintf(fout,"%d ",countM[i]);
}
fprintf(fout,"\n\n-----------------------------------------\n\n");
free(countM);
free(membership);
free(data);
free(cluster);
fclose(fin);
fclose(fout);
return 0;
}
| f7128c9a8237c4973ab50459c88e4f2075d2dd57.cu | #include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#ifndef FLT_MAX
#define FLT_MAX 3.40282347e+38
#endif
__device__ inline float euclid_dist_2(int tid, int numObjects, int numAttributes, const float* __restrict__ attributes,
int clusterId, const float* __restrict__ clusters){
float ans=0.0;
for(int i = 0; i < numAttributes; i++){
float diff = attributes[tid + i*numObjects] - clusters[i + clusterId*numAttributes];
ans += diff*diff;
}
return ans;
}
__device__ inline int find_nearest_point(int tid, \
int numObjects,
int numAttributes,
const float* __restrict__ attributess,
const float* __restrict__ centers,
int ncenters)
{
int index = 0;
float min_dist = FLT_MAX;
for (int i = 0; i < ncenters; i++) {
float dist;
dist = euclid_dist_2(tid, numObjects, numAttributes, attributess, i, centers);
if (dist < min_dist) {
min_dist = dist;
index = i;
}
}
return(index);
}
__global__ void findNewClusterIndex(int numObjects, int numAttributes, const float* __restrict__ attributes, \
int numClusters, const float* __restrict__ clusters, int* __restrict__ membership, \
int* __restrict__ new_centers_len, float* __restrict__ new_centers, float* __restrict__ delta){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < numObjects){
/* find the index of nestest cluster centers */
int index = find_nearest_point(tid, numObjects, numAttributes, attributes, clusters, numClusters);
/* if membership changes, increase delta by 1 */
if (membership[tid] != index) atomicAdd(delta, 1.0f);
/* assign the membership to object i */
membership[tid] = index;
/* update new cluster centers : sum of objects located within */
atomicAdd(new_centers_len+index, 1);
}
}
__global__ void updateNewCluster(int numObjects, int numAttributes, const float* __restrict__ attributes, \
int numClusters, const float* __restrict__ clusters, int* __restrict__ membership, \
int* __restrict__ new_centers_len, float* __restrict__ new_centers, float* __restrict__ delta, float* __restrict__ s){
// extern __shared__ float s[]; //__shared__
int tid = blockDim.x*blockIdx.x + threadIdx.x;
// for(int i = threadIdx.x; i < numClusters*numAttributes; i += blockDim.x){
// s[i] = 0.0f;
// }
// __syncthreads();
if(tid < numObjects){
int index = membership[tid];
for(int j = 0; j < numAttributes; j++){
atomicAdd(new_centers+index*numAttributes+j, attributes[tid + numObjects*j]);
}
}
__syncthreads();
//for(int i = 0; i < numClusters; i++){
// for(int j = threadIdx.x; j < numAttributes; j += blockDim.x){
// atomicAdd(new_centers+i*numAttributes+j, s[j + numAttributes*i]);
// }
// }
}
__global__ void updateCenter(int numClusters, int numAttributes, float* __restrict__ clusters,\
int* __restrict__ new_centers_len, float* __restrict__ new_centers){
/* replace old cluster centers with new_centers */
for(int i = blockIdx.x; i < numClusters; i += gridDim.x) {
for(int j = threadIdx.x; j < numAttributes; j += blockDim.x) {
if (new_centers_len[i] > 0)
clusters[i*numAttributes + j] = new_centers[i*numAttributes + j] / new_centers_len[i];
new_centers[i*numAttributes + j] = 0.0; /* set back to 0 */
}
new_centers_len[i] = 0; /* set back to 0 */
}
}
/*----< kmeans_clustering() >---------------------------------------------*/
void kmeans_clustering(int numObjects,
int numAttributes,
float *attributes, /* in: [numObjects][numAttributes] */
int *membership,
int numClusters,
float* clusters,
float threshold){
int *d_new_centers_len; /* [numClusters]: no. of points in each cluster */
cudaMalloc((void**)&d_new_centers_len, numClusters*sizeof(int));
float *d_new_centers; /* [numClusters][numAttributes] */
cudaMalloc((void**)&d_new_centers, numClusters*numAttributes*sizeof(int));
float *d_delta;
cudaMalloc((void**)&d_delta, sizeof(float));
float *d_attributes;
cudaMalloc((void**)&d_attributes, numObjects*numAttributes*sizeof(float));
cudaMemcpy(d_attributes, attributes, numObjects*numAttributes*sizeof(float), cudaMemcpyDefault);
int *d_membership;
cudaMalloc((void**)&d_membership, numObjects*sizeof(int));
cudaMemcpy(d_membership, membership, numObjects*sizeof(int), cudaMemcpyDefault);
float *d_clusters;
cudaMalloc((void**)&d_clusters, numClusters*numAttributes*sizeof(float));
cudaMemcpy(d_clusters, clusters, numClusters*numAttributes*sizeof(float), cudaMemcpyDefault);
float delta = 0.0;
float *s;
s = (float *)calloc(numClusters*numAttributes,sizeof(float));
float *d_s;
cudaMalloc((void**)&d_s, numClusters*numAttributes*sizeof(float));
do {
cudaMemset(d_new_centers_len, 0, numClusters*sizeof(int));
cudaMemset(d_new_centers, 0, numClusters*numAttributes*sizeof(int));
cudaMemset(d_delta, 0, sizeof(int));
cudaMemset(d_s, 0, numClusters*numAttributes*sizeof(float));
int blockSize = 256;
int gridSize = (numObjects+blockSize-1)/blockSize;
findNewClusterIndex<<<gridSize, blockSize>>>(numObjects, numAttributes, d_attributes, numClusters, d_clusters, d_membership, d_new_centers_len, d_new_centers, d_delta);
// updateNewCluster<<<gridSize, blockSize, numClusters*numAttributes*sizeof(float)>>>(numObjects, numAttributes, d_attributes, numClusters, d_clusters, d_membership, d_new_centers_len, d_new_centers, d_delta);
updateNewCluster<<<gridSize, blockSize>>>(numObjects, numAttributes, d_attributes, numClusters, d_clusters, d_membership, d_new_centers_len, d_new_centers, d_delta, d_s);
cudaMemcpy(s, d_s, numClusters*numAttributes*sizeof(float), cudaMemcpyDefault);
// for (int i=0; i<numClusters*numAttributes; i++) {
// printf("%f ",s[i]);
// }
// printf("\n\n");
updateCenter<<<numClusters, blockSize>>>(numClusters, numAttributes, d_clusters, d_new_centers_len, d_new_centers);
//delta /= numObjects;
cudaMemcpy(&delta, d_delta, sizeof(float), cudaMemcpyDefault);
printf("%.3f %.3f\n", delta, threshold);
} while (delta > threshold);
cudaMemcpy(clusters, d_clusters, numClusters*numAttributes*sizeof(float), cudaMemcpyDefault);
cudaMemcpy(membership,d_membership,numObjects*sizeof(int),cudaMemcpyDefault);
cudaFree(d_new_centers_len);
cudaFree(d_new_centers);
cudaFree(d_delta);
cudaFree(d_attributes);
cudaFree(d_membership);
cudaFree(d_clusters);
cudaFree(d_s);
free(s);
}
int main(int argc, char **argv) {
FILE * fin=fopen("oData.txt","r");
FILE * fout=fopen("output.txt","w");
int nums = 4000000,dim = 128,k = 96;
int thold = 1000;
sscanf(argv[1],"%d",&k);
sscanf(argv[2],"%d",&nums);
printf("k=%d\n",k);
printf("n=%d\n",nums);
int *membership,*countM;
float *data,*cluster,tmp;
srand((unsigned)time(NULL));
membership = (int *)calloc(nums,sizeof(int));
data = (float *)calloc(nums*dim,sizeof(float));
cluster = (float *)calloc(k*dim,sizeof(float));
countM = (int *)calloc(k,sizeof(int));
for (int i=0; i<nums; i++)
for (int j=0; j<dim; j++) {
fscanf(fin,"%f",&tmp);
data[i+j*nums] = tmp;
}
//for (int i=0; i<nums*dim; i++) printf("%f ",data[i]);
for (int i=0; i<nums; i++) {
membership[i] = 0;
}
int target;
for (int i=0; i<k; i++)
for (int j=0; j<dim; j++) {
//cluster[i*dim+j] = rand()%maxd[j];
target = rand()%nums;
cluster[i*dim+j]=data[target+j*nums];
}
kmeans_clustering(nums,dim,data,membership,k,cluster,thold);
for (int i=0; i<k*dim; i++) {
if (i%dim == 0) fprintf(fout,"\n\n");
fprintf(fout,"%f ",cluster[i]);
}
fprintf(fout,"\n\n-----------------------------------------\n\n");
memset(countM,k*sizeof(int),0);
for (int i=0; i<nums; i++) {
fprintf(fout,"%d ",membership[i]);
countM[membership[i]]++;
}
fprintf(fout,"\n\n-----------------------------------------\n\n");
for (int i=0; i<k; i++) {
fprintf(fout,"%d ",countM[i]);
}
fprintf(fout,"\n\n-----------------------------------------\n\n");
free(countM);
free(membership);
free(data);
free(cluster);
fclose(fin);
fclose(fout);
return 0;
}
|
c8ca4fb27c8078d51609545cbff0a2a398122e5c.hip | // !!! This is a file automatically generated by hipify!!!
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
namespace at { namespace native {
void acos_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "acos_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
void asin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "asin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
void atan_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "atan_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atan(a);
});
});
}
void sin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sin(a);
});
});
}
void cos_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "cos_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cos(a);
});
});
}
void sinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
void cosh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "cosh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cosh(a);
});
});
}
void tanh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "tanh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
void acosh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "acosh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
void asinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "asinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
void atanh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "atanh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atanh(a);
});
});
}
void tan_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "tan_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tan(a);
});
});
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);
REGISTER_DISPATCH(atanh_stub, &atanh_kernel_cuda);
REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);
REGISTER_DISPATCH(atan_stub, &atan_kernel_cuda);
REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);
REGISTER_DISPATCH(cos_stub, &cos_kernel_cuda);
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);
REGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda);
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda);
}} // namespace at::native
| c8ca4fb27c8078d51609545cbff0a2a398122e5c.cu | #include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
namespace at { namespace native {
void acos_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "acos_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
void asin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "asin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
void atan_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "atan_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atan(a);
});
});
}
void sin_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sin(a);
});
});
}
void cos_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "cos_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cos(a);
});
});
}
void sinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "sinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
void cosh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "cosh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cosh(a);
});
});
}
void tanh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "tanh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
void acosh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "acosh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
void asinh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "asinh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
void atanh_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "atanh_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atanh(a);
});
});
}
void tan_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "tan_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tan(a);
});
});
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);
REGISTER_DISPATCH(atanh_stub, &atanh_kernel_cuda);
REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);
REGISTER_DISPATCH(atan_stub, &atan_kernel_cuda);
REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);
REGISTER_DISPATCH(cos_stub, &cos_kernel_cuda);
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);
REGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda);
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda);
}} // namespace at::native
|
227d579ba866d9203b40ea7ce66ae3a5affe36bc.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <string>
#include <omp.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "timer.h"
using namespace std;
#define USE_OMP
#if defined(_DEBUG)
#define GIGA (1 << 20)
#else
#define GIGA (1 << 30)
#endif
#define BMSIZE (GIGA / 8)
#define MAX_PATTERN_LENGTH 256
__constant__ char dev_pattern[MAX_PATTERN_LENGTH];
__constant__ int dev_pattern_size;
__device__ char * dev_buffer = nullptr;
__device__ unsigned char * dev_bitmap = nullptr;
__global__ void SearchGPU_V1(char * buffer, int buffer_size, unsigned char * bitmap, int bitmap_size)
{
//my impression: use this index to run through the buffer one char at a time
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pIndex;
//the pattern index
for (pIndex = index; pIndex < dev_pattern_size; pIndex++)
{
char tmp = *(buffer + index + pIndex);
if (tmp < 65 || (tmp > 90 && tmp < 97)) {
break;
}
else if (tmp > 64 && tmp < 91) {
tmp += 32;
}
if (tmp != *(dev_pattern + pIndex))
break;
}
//if both of the words we gathered are of the same size, the pattern must match
if (pIndex == dev_pattern_size)
{
int byte_number = index >> 3;
if (byte_number < bitmap_size)
{
int bit_number = index % 8;
//need atomicity
//CUDA Atomic functions
{
*(bitmap + byte_number) |= (1 << bit_number);
}
}
}
}
int SearchCPU_V1(char * buffer, int buffer_size, char * pattern, int pattern_size, unsigned char * bitmap, int bitmap_size)
{
int rv = 0;
#if defined(USE_OMP)
#pragma omp parallel for
#endif
for (int cIndex = 0; cIndex < buffer_size; cIndex++)
{
int pIndex;
for (pIndex = 0; pIndex < pattern_size; pIndex++)
{
if (tolower(*(buffer + cIndex + pIndex)) != *(pattern + pIndex))
break;
}
if (pIndex == pattern_size)
{
int byte_number = cIndex >> 3;
if (byte_number < bitmap_size)
{
int bit_number = cIndex % 8;
#if defined(USE_OMP)
#pragma omp critical
#endif
{
*(bitmap + byte_number) |= (1 << bit_number);
rv++;
}
}
}
}
return rv;
}
/* CStringToLower() - this function flattens a c string to all
lower case. It marches through memory until a null byte is
found. As such, some may consider this function unsafe.
By flattening the pattern, we can eliminate a tolower in
the search function - a potentially big win.
The original pointer is returned so that the function can be
used in an assignment statement.
*/
char * CStringToLower(char * s)
{
char * rv = s;
for (; *s != NULL; s++)
{
*s = tolower(*s);
}
return rv;
}
inline void CheckCudaAndThrow(hipError_t t, const string & message)
{
if (t != hipSuccess)
throw message;
}
int main(int argc, char * argv[])
{
cout.imbue(locale(""));
ifstream f("C:/Users/educ/Documents/enwiki-latest-abstract.xml");
hptimer hpt;
char * hst_buffer = nullptr;
unsigned char * hst_bm = nullptr;
unsigned char * chk_bm = nullptr;
#if defined(USE_OMP)
cout << "OMP enabled on " << omp_get_max_threads() << " threads." << endl;
#endif
try
{
if (argc < 2)
throw string("First argument must be target string.");
char * pattern = CStringToLower(argv[1]);
int pattern_size = strlen(pattern);
if (!f.is_open())
throw string("File failed to open");
hst_buffer = new char[GIGA];
hst_bm = new unsigned char[BMSIZE]();
chk_bm = new unsigned char[BMSIZE];
hpt.TimeSinceLastCall();
f.read(hst_buffer, GIGA);
if (!f)
throw string("Failed to read full buffer.");
double read_time = hpt.TimeSinceLastCall();
cout << GIGA << " bytes read from disk in " << read_time << " seconds at " << GIGA / read_time / double(1 << 30) << " GB / second." << endl;
CheckCudaAndThrow(hipSetDevice(0), string("hipSetDevice(0) failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipMalloc(&dev_buffer, GIGA), string("hipMalloc failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipMalloc(&dev_bitmap, BMSIZE), string("hipMalloc failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipMemset(dev_bitmap, 0, BMSIZE), string("hipMemset failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipMemcpyToSymbol(dev_pattern, pattern, pattern_size, 0), string("hipMemcpyToSymbol failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipMemcpyToSymbol(dev_pattern_size, &pattern_size, sizeof(int), 0), string("hipMemcpyToSymbol failed on line ") + to_string(__LINE__));
hpt.TimeSinceLastCall();
CheckCudaAndThrow(hipMemcpy(dev_buffer, hst_buffer, GIGA, hipMemcpyHostToDevice), string("hipMemcpy failed on line ") + to_string(__LINE__));
double copy_time = hpt.TimeSinceLastCall();
cout << GIGA << " data bytes copied to GPU in " << copy_time << " seconds at " << GIGA / copy_time / double(1 << 30) << " GB / second." << endl;
hpt.TimeSinceLastCall();
int matches_found = SearchCPU_V1(hst_buffer, GIGA, pattern, pattern_size, hst_bm, BMSIZE);
double time_cpu = hpt.TimeSinceLastCall();
cout << "SearchCPU_V1 found " << matches_found << " matches in " << time_cpu << " seconds.";
cout << " Searched " << GIGA / time_cpu / double(1 << 30) << " GB / second." << endl;
int threads_per_block = 1024;
dim3 grid(1024, 1024);
hpt.TimeSinceLastCall();
SearchGPU_V1 << <grid, threads_per_block >> >(dev_buffer, GIGA, dev_bitmap, BMSIZE);
CheckCudaAndThrow(hipGetLastError(), string("kernel launch failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipDeviceSynchronize(), string("hipDeviceSynchronize() failed on line ") + to_string(__LINE__));
double time_gpu = hpt.TimeSinceLastCall();
CheckCudaAndThrow(hipMemcpy(chk_bm, dev_bitmap, BMSIZE, hipMemcpyDeviceToHost), string("hipMemcpy() failed on line ") + to_string(__LINE__));
unsigned int * bm_alias = (unsigned int *)chk_bm;
int match_count = 0;
for (int i = 0; i < BMSIZE / sizeof(int); i++)
{
unsigned int c = 0;
unsigned int v = *(bm_alias + i);
for (c = 0; v; c++)
{
v &= v - 1;
}
match_count += c;
}
cout << "SearchGPU_V1 found " << match_count << " matches in " << time_gpu << " seconds.";
cout << " Searched " << GIGA / time_gpu / double(1 << 30) << " GB / second." << endl;
cout << endl;
cout << "Ratio: " << time_cpu / time_gpu << " to 1" << endl;
}
catch (string s)
{
cout << s << endl;
}
if (dev_buffer != nullptr)
hipFree(dev_buffer);
if (dev_bitmap != nullptr)
hipFree(dev_bitmap);
if (hst_buffer != nullptr)
delete[] hst_buffer;
if (hst_bm != nullptr)
delete[] hst_bm;
if (f.is_open())
f.close();
hipDeviceReset();
#if defined(WIN64) || defined(WIN32)
cout << endl;
system("pause");
#endif
return 0;
} | 227d579ba866d9203b40ea7ce66ae3a5affe36bc.cu |
#include <iostream>
#include <fstream>
#include <string>
#include <omp.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "timer.h"
using namespace std;
#define USE_OMP
#if defined(_DEBUG)
#define GIGA (1 << 20)
#else
#define GIGA (1 << 30)
#endif
#define BMSIZE (GIGA / 8)
#define MAX_PATTERN_LENGTH 256
__constant__ char dev_pattern[MAX_PATTERN_LENGTH];
__constant__ int dev_pattern_size;
__device__ char * dev_buffer = nullptr;
__device__ unsigned char * dev_bitmap = nullptr;
__global__ void SearchGPU_V1(char * buffer, int buffer_size, unsigned char * bitmap, int bitmap_size)
{
//my impression: use this index to run through the buffer one char at a time
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pIndex;
//the pattern index
for (pIndex = index; pIndex < dev_pattern_size; pIndex++)
{
char tmp = *(buffer + index + pIndex);
if (tmp < 65 || (tmp > 90 && tmp < 97)) {
break;
}
else if (tmp > 64 && tmp < 91) {
tmp += 32;
}
if (tmp != *(dev_pattern + pIndex))
break;
}
//if both of the words we gathered are of the same size, the pattern must match
if (pIndex == dev_pattern_size)
{
int byte_number = index >> 3;
if (byte_number < bitmap_size)
{
int bit_number = index % 8;
//need atomicity
//CUDA Atomic functions
{
*(bitmap + byte_number) |= (1 << bit_number);
}
}
}
}
int SearchCPU_V1(char * buffer, int buffer_size, char * pattern, int pattern_size, unsigned char * bitmap, int bitmap_size)
{
int rv = 0;
#if defined(USE_OMP)
#pragma omp parallel for
#endif
for (int cIndex = 0; cIndex < buffer_size; cIndex++)
{
int pIndex;
for (pIndex = 0; pIndex < pattern_size; pIndex++)
{
if (tolower(*(buffer + cIndex + pIndex)) != *(pattern + pIndex))
break;
}
if (pIndex == pattern_size)
{
int byte_number = cIndex >> 3;
if (byte_number < bitmap_size)
{
int bit_number = cIndex % 8;
#if defined(USE_OMP)
#pragma omp critical
#endif
{
*(bitmap + byte_number) |= (1 << bit_number);
rv++;
}
}
}
}
return rv;
}
/* CStringToLower() - this function flattens a c string to all
lower case. It marches through memory until a null byte is
found. As such, some may consider this function unsafe.
By flattening the pattern, we can eliminate a tolower in
the search function - a potentially big win.
The original pointer is returned so that the function can be
used in an assignment statement.
*/
char * CStringToLower(char * s)
{
char * rv = s;
for (; *s != NULL; s++)
{
*s = tolower(*s);
}
return rv;
}
inline void CheckCudaAndThrow(cudaError_t t, const string & message)
{
if (t != cudaSuccess)
throw message;
}
int main(int argc, char * argv[])
{
cout.imbue(locale(""));
ifstream f("C:/Users/educ/Documents/enwiki-latest-abstract.xml");
hptimer hpt;
char * hst_buffer = nullptr;
unsigned char * hst_bm = nullptr;
unsigned char * chk_bm = nullptr;
#if defined(USE_OMP)
cout << "OMP enabled on " << omp_get_max_threads() << " threads." << endl;
#endif
try
{
if (argc < 2)
throw string("First argument must be target string.");
char * pattern = CStringToLower(argv[1]);
int pattern_size = strlen(pattern);
if (!f.is_open())
throw string("File failed to open");
hst_buffer = new char[GIGA];
hst_bm = new unsigned char[BMSIZE]();
chk_bm = new unsigned char[BMSIZE];
hpt.TimeSinceLastCall();
f.read(hst_buffer, GIGA);
if (!f)
throw string("Failed to read full buffer.");
double read_time = hpt.TimeSinceLastCall();
cout << GIGA << " bytes read from disk in " << read_time << " seconds at " << GIGA / read_time / double(1 << 30) << " GB / second." << endl;
CheckCudaAndThrow(cudaSetDevice(0), string("cudaSetDevice(0) failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaMalloc(&dev_buffer, GIGA), string("cudaMalloc failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaMalloc(&dev_bitmap, BMSIZE), string("cudaMalloc failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaMemset(dev_bitmap, 0, BMSIZE), string("cudaMemset failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaMemcpyToSymbol(dev_pattern, pattern, pattern_size, 0), string("cudaMemcpyToSymbol failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaMemcpyToSymbol(dev_pattern_size, &pattern_size, sizeof(int), 0), string("cudaMemcpyToSymbol failed on line ") + to_string(__LINE__));
hpt.TimeSinceLastCall();
CheckCudaAndThrow(cudaMemcpy(dev_buffer, hst_buffer, GIGA, cudaMemcpyHostToDevice), string("cudaMemcpy failed on line ") + to_string(__LINE__));
double copy_time = hpt.TimeSinceLastCall();
cout << GIGA << " data bytes copied to GPU in " << copy_time << " seconds at " << GIGA / copy_time / double(1 << 30) << " GB / second." << endl;
hpt.TimeSinceLastCall();
int matches_found = SearchCPU_V1(hst_buffer, GIGA, pattern, pattern_size, hst_bm, BMSIZE);
double time_cpu = hpt.TimeSinceLastCall();
cout << "SearchCPU_V1 found " << matches_found << " matches in " << time_cpu << " seconds.";
cout << " Searched " << GIGA / time_cpu / double(1 << 30) << " GB / second." << endl;
int threads_per_block = 1024;
dim3 grid(1024, 1024);
hpt.TimeSinceLastCall();
SearchGPU_V1 << <grid, threads_per_block >> >(dev_buffer, GIGA, dev_bitmap, BMSIZE);
CheckCudaAndThrow(cudaGetLastError(), string("kernel launch failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaDeviceSynchronize(), string("cudaDeviceSynchronize() failed on line ") + to_string(__LINE__));
double time_gpu = hpt.TimeSinceLastCall();
CheckCudaAndThrow(cudaMemcpy(chk_bm, dev_bitmap, BMSIZE, cudaMemcpyDeviceToHost), string("cudaMemcpy() failed on line ") + to_string(__LINE__));
unsigned int * bm_alias = (unsigned int *)chk_bm;
int match_count = 0;
for (int i = 0; i < BMSIZE / sizeof(int); i++)
{
unsigned int c = 0;
unsigned int v = *(bm_alias + i);
for (c = 0; v; c++)
{
v &= v - 1;
}
match_count += c;
}
cout << "SearchGPU_V1 found " << match_count << " matches in " << time_gpu << " seconds.";
cout << " Searched " << GIGA / time_gpu / double(1 << 30) << " GB / second." << endl;
cout << endl;
cout << "Ratio: " << time_cpu / time_gpu << " to 1" << endl;
}
catch (string s)
{
cout << s << endl;
}
if (dev_buffer != nullptr)
cudaFree(dev_buffer);
if (dev_bitmap != nullptr)
cudaFree(dev_bitmap);
if (hst_buffer != nullptr)
delete[] hst_buffer;
if (hst_bm != nullptr)
delete[] hst_bm;
if (f.is_open())
f.close();
cudaDeviceReset();
#if defined(WIN64) || defined(WIN32)
cout << endl;
system("pause");
#endif
return 0;
} |
dcbcc2767fc27109848bb68ef63245e1f98286a7.hip | // !!! This is a file automatically generated by hipify!!!
//nvcc prng21.cu -I/home/couturie/TestU01-inst/include -I/home/couturie/NVIDIA_GPU_Computing_SDK/CUDALibraries/common/inc/ -o prng21 -ltestu01 -lprobdist -lmylib -lm -L/usr/local/cuda/lib64 -lcuda -lcudart
//nvcc perf_opti_bbs2.cu -I ~/TestU01-inst2/include -I ~/NVIDIA_GPU_Computing_SDK/C/common/inc/ -o perf_opti_bbs2 -ltestu01 -lprobdist -lmylib -lm -L/usr/local/cuda/lib64 -lcuda -lcudart -L/$HOME/NVIDIA_GPU_Computing_SDK/C/lib -lcutil -arch=sm_13 -O3
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
/*
extern "C" {
#include "unif01.h"
#include "bbattery.h"
}
*/
//extern "C" {
// int load_RGB_pixmap(char *filename, int *width, int *height, unsigned char**R_data, unsigned char**G_data, unsigned char**B_data);
//void store_RGB_pixmap(char *filename, unsigned char *R_data, unsigned char *G_data, unsigned char *B_data, int width, int height);
//}
typedef unsigned char uchar;
int nb=64;//512*2;
uint size=512;
//const uint nb_ele=8192*8192/4*3;//1024*1024*2;
//const uint nb_ele=9000*9000/4*3;//1024*1024*2;
uint nb_ele;
const uint ssize=512;
uint blocks;
ulong nb_numbers=0;
typedef unsigned int uint;
typedef unsigned long ulong;
typedef unsigned short ushort;
__device__ inline ulong rotl(const ulong x, int k) {
return (x << k) | (x >> (64 - k));
}
__device__ inline
ulong xoroshiro128plus(ulong2* rng) {
const ulong s0 = rng->x;
ulong s1 = rng->y;
const ulong result = rng->x + rng->y;
s1 ^= s0;
rng->x = rotl(s0, 24) ^ s1 ^ (s1 << 16); // a, b
rng->y = rotl(s1, 37); // c
return result;
}
__device__ inline
ulong xorshift64(ulong t)
{
/* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */
ulong x = t;
x ^= x >> 12; // a
x ^= x << 25; // b
x ^= x >> 27; // c
return x;
}
ulong xor128(ulong t) {
static uint x = 123456789,
y = 362436069,
z = 521288629,
w = 88675123;
t = x ^ (x << 11);
x = y; y = z; z = w;
w = w ^ (w >> 19) ^ (t ^ (t >> 8));
//printf("%u %u %u %u %u\n",x,y,z,w,t);
return w;
}
ulong *d_random;
uchar *d_Pbox;
uchar *d_Sbox;
ulong2 *d_val;
uint *h_v;
uint *h_x;
uint *h_random;
ulong2 *h_val;
//typedef struct { ulong state; ulong inc; } pcg32_random_t;
__device__ inline
uint pcg32_random_r(ulong2* rng)
{
// pcg32_random_t *rng=(pcg32_random_t*)rng2;
ulong oldstate = rng->x;
// Advance internal state
rng->x = oldstate * 6364136223846793005ULL + (rng->y|1);
// Calculate output function (XSH RR), uses old state for max ILP
uint xorshifted = ((oldstate >> 18u) ^ oldstate) >> 27u;
uint rot = oldstate >> 59u;
return (xorshifted >> rot) | (xorshifted << ((-rot) & 31));
}
__device__ __host__ inline
uint xorshift32(const uint t)
{
/* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */
uint x = t;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
return x;
}
__device__
ulong xor128_device(ulong4 *d) {
ulong t = d->x ^ (d->x << 11);
d->x = d->y; d->y = d->z; d->z = d->w;
d->w = d->w ^ (d->w >> 19) ^ (t ^ (t >> 8));
return d->w;
}
__device__
ulong xorshift_device(ulong *v, ulong4 *d) {
ulong t = d->x^(d->x>>7);
d->x=d->y; d->y=d->z; d->z=d->w; d->w=*v;
*v=(*v^(*v<<6))^(t^(t<<13));
return (d->y+d->y+1)*(*v);
}
__device__
unsigned long xorwow_device(ulong2 *v, ulong4 *d){
ulong t=(d->x^(d->x>>2));
d->x=d->y; d->y=d->z; d->z=d->w; d->w=v->x;
v->x=(v->x^(v->x<<4))^(t^(t<<1));
return (v->y+=362437)+v->x;
}
__device__ __host__
ulong
xorshift64star(ulong *s)
{
ulong x = s[0];
x ^= x >> 12;
x ^= x << 25;
x ^= x >> 27;
s[0] = x;
return x * (ulong)(0x2545f4914f6cdd1d);
}
__device__
ulong xor64_device(uint4 *d, uint *t) {
*t = d->x ^ (d->x << 11);
d->x = d->y; d->y = d->z; d->z= d->w;
d->w = d->w ^ (d->w >> 19) ^ ((*t) ^ ((*t) >> 8));
return d->w;
}
typedef ulong uint64_t ;
#define INT64_C(c) (c ## LL)
#define UINT64_C(c) (c ## ULL)
__device__
static inline uint64_t splitmix64(ulong *nb) {
uint64_t z = ((*nb) += UINT64_C(0x9E3779B97F4A7C15));
z = (z ^ (z >> 30)) * UINT64_C(0xBF58476D1CE4E5B9);
z = (z ^ (z >> 27)) * UINT64_C(0x94D049BB133111EB);
return z ^ (z >> 31);
}
extern __shared__ unsigned long shmem[];
const int size_pbox=32;
const int nb_sbox=16;
static int width;
static int height;
__global__
void prng_kernel( ulong2 *dpcg, uchar * __restrict__ Pbox, uchar * __restrict__ Sbox2, ulong *d_random, ulong * __restrict__ d_seq,int nb_ele,int nb) {
uint i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<nb_ele) {
ulong2 pcg=dpcg[i];
ulong w,w2,res;
uchar* res2;
unsigned offset=threadIdx.x & (size_pbox-1);
int n;
unsigned base=threadIdx.x-offset;
uchar *Sbox;
//Sbox=&Sbox2[256*(pcg.x&(nb_sbox-1))];
//Sbox=&Sbox2[256*(threadIdx.x&(nb_sbox-1))];
Sbox=Sbox2;
for(int j=0;j<nb;j++) {
// w=splitmix64(&pcg.x);
//w=xorshift64star(&pcg.x);
int o0=base+ Pbox[size_pbox*(pcg.x&15)+offset];
int o1=base+ Pbox[size_pbox*(16+pcg.y&15)+offset];
w=xoroshiro128plus(&pcg);
//too slow
/*w=pcg32_random_r(&pcg);
w=w<<32;
w=w|pcg32_random_r(&pcg);
*/
shmem[threadIdx.x]=w;
w2=xorshift64(w);
// __syncthreads();
w2=w2^shmem[o0]^shmem[o1];
res= w^w2;
// if(i==0)
// printf("%u\n",(w&(nb_sbox-1))<<7);
res2=(uchar*)&res;
res2[0]=Sbox[res2[0]];
res2[1]=Sbox[res2[1]];
res2[2]=Sbox[res2[2]];
res2[3]=Sbox[res2[3]];
res2[4]=Sbox[res2[4]];
res2[5]=Sbox[res2[5]];
res2[6]=Sbox[res2[6]];
res2[7]=Sbox[res2[7]];
/*if(i==0)
printf("%u\n",res);
*/
d_random[i+j*nb_ele]= res^d_seq[i+j*nb_ele];
}
dpcg[i]=pcg;
}
}
void rc4keyperm(uchar *key,int len, int rp,uchar *sc, int size_DK) {
//sc=1:len;
for (int i=0;i<len;i++) {
sc[i]=i;
}
for (int it = 0; it < rp; it++) {
int j0 = 1;
for(int i0 = 0; i0<len; i0++) {
j0 = (j0 + sc[i0] + sc[j0] + key[i0%size_DK] )% len;
int tmp = sc[i0];
sc[i0] = sc[j0];
sc[j0] = tmp;
}
}
}
void rc4key(uchar *key, uchar *sc, int size_DK) {
for(int i=0;i<256;i++) {
sc[i]=i;
}
uchar j0 = 0;
for(int i0=0; i0<256; i0++) {
j0 = (j0 + sc[i0] + key[i0%size_DK] )&0xFF;
uchar tmp = sc[i0];
sc[i0] = sc[j0 ];
sc[j0] = tmp;
}
}
uint test(int argc, char** argv)
{
/*
static ulong t=122190821;
t=xor128(t);
return (uint)t;
*/
static int str=0, old_str=0;
static ulong need_generation=1;
static ushort init=1;
ulong dum,j;
static uchar *data_R, *data_G, *data_B;
static long imsize;
static uchar* seq;
static uchar* seq2;
static ulong* d_seq;
static int oneD;
static uchar *Pbox;
static uchar *Sbox;
if(init==1) {
// h_val=(ulong2*)malloc(nb_ele*sizeof(ulong2));
h_val=(ulong2*)malloc(nb_ele*sizeof(ulong2));
Pbox=new uchar[32*size_pbox];
Sbox=new uchar[256*nb_sbox];
hipHostMalloc((void**)&h_random,nb_ele*nb*sizeof(ulong));
//ulong myseed=121;
ulong s1,s2;
sscanf(argv[3], "%lu", &s1);
sscanf(argv[4], "%lu", &s2);
for(int i=0;i<32;i++) {
ulong val[2];
val[0]=xorshift64star(&s1);
val[1]=xorshift64star(&s2);
uchar *DK=(uchar*)val;
rc4keyperm(DK, size_pbox, 1, &Pbox[size_pbox*i], 16);
}
for(int i=0;i<nb_sbox;i++)
rc4key(&Pbox[i*8], &Sbox[256*i], 8);
//for(int i=0;i<32;i++) {
//for(int j=0;j<size_pbox;j++)
//printf("%u ",Pbox[size_pbox*i+j]);
//printf("\n\n");
//}
printf("\n %lu %lu \n",s1,s2);
for(int i=0;i<nb_ele;i++) {
h_val[i].x=xorshift64star(&s1);
h_val[i].y=xorshift64star(&s2);
if(i==0) {
//printf("%lu %lu\n",h_val[i].x,h_val[i].y);
}
}
hipMalloc((void**) &d_random, nb_ele*nb*sizeof(ulong)) ;
hipMalloc((void**) &d_Pbox,size_pbox*32*sizeof(uchar)) ;
hipMalloc((void**) &d_Sbox,nb_sbox*256*sizeof(uchar)) ;
hipMalloc((void**) &d_val, nb_ele*sizeof(ulong2)) ;
hipMemcpy(d_val, h_val, nb_ele*sizeof(ulong2), hipMemcpyHostToDevice) ;
hipMemcpy(d_Pbox, Pbox, 32*size_pbox*sizeof(uchar), hipMemcpyHostToDevice) ;
hipMemcpy(d_Sbox, Sbox, nb_sbox*256*sizeof(uchar), hipMemcpyHostToDevice) ;
/*if(size==32768) {
load_RGB_pixmap("32768.ppm", &width, &height, &data_R, &data_G, &data_B);
// width=height=32768;
}
if(size==16384)
load_RGB_pixmap("16384.ppm", &width, &height, &data_R, &data_G, &data_B);
if(size==8192)
load_RGB_pixmap("8192.ppm", &width, &height, &data_R, &data_G, &data_B);
if(size==4096)
load_RGB_pixmap("4096.ppm", &width, &height, &data_R, &data_G, &data_B);
if(size==2048)
load_RGB_pixmap("2048.ppm", &width, &height, &data_R, &data_G, &data_B);
if(size==1024)
load_RGB_pixmap("1024.ppm", &width, &height, &data_R, &data_G, &data_B);
if(size==512)
load_RGB_pixmap("lena.ppm", &width, &height, &data_R, &data_G, &data_B);
*/
// store_RGB_pixmap("test.ppm", data_R, data_G, data_B, width, height);
imsize=(long)width*(long)height*3;
printf("size image %ld\n",imsize);
printf("eee1\n");
seq= new uchar[imsize];
printf("eee2\n");
seq2= new uchar[imsize];
printf("eee3\n");
oneD=width*height;
printf("size %d %d\n",width,height);
//if(size!=32768) {
for(int i=0;i<oneD;i++) {
seq[i]= 1;//data_R[i];
seq[oneD+i]=128; //data_G[i];
seq[2*oneD+i]= 255; //data_B[i];
}
/* }
else {
}*/
int val=hipMalloc((void**)&d_seq,imsize*sizeof(uchar));
// printf("malloc %d\n",val);
if(val==hipSuccess)
printf("OK \n",val);
val=hipMemcpy(d_seq,seq, imsize*sizeof(uchar), hipMemcpyHostToDevice);
//printf("memcpy %d\n",val);
if(val==hipSuccess)
printf("OK \n",val);
init=0;
}
/* hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
*/
if(need_generation==1) {
hipEvent_t start, stop;
float time;
hipLaunchKernelGGL(( prng_kernel), dim3(blocks),dim3(ssize),ssize*8, 0, d_val, d_Pbox, d_Sbox,d_random, d_seq,nb_ele,nb);
printf("nb blocks %d nb thd blocks %d\n",blocks,ssize);
//hipEventCreate(&start);
//hipEventCreate(&stop);
//hipEventRecord(start, 0);
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
for(int i=0;i<100;i++) {
sdkStartTimer(&timer);
hipLaunchKernelGGL(( prng_kernel), dim3(blocks),dim3(ssize),ssize*8, 0, d_val, d_Pbox, d_Sbox,d_random, d_seq,nb_ele,nb);
// hipDeviceSynchronize();
hipDeviceSynchronize();
sdkStopTimer(&timer);
}
time = sdkGetAverageTimerValue(&timer);
//hipEventRecord(stop, 0);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&time, start, stop);
printf("GPU processing time : %f s \n", time/1000);
printf("Throughput %f Gbits/s\n", ((double)width*height*3*8)/time/1e6);
printf("image size : %ld Bytes \n", (long) width*height*3);
// hipMemcpy(h_random, d_random, nb_ele*nb*sizeof(uint), hipMemcpyDeviceToHost) ;
hipMemcpy(seq2, d_random, nb_ele*nb*sizeof(ulong), hipMemcpyDeviceToHost) ;
//if(size!=32768) {
//for(int i=0;i<oneD;i++) {
//data_R[i]=seq2[i];
//data_G[i]=seq2[oneD+i];
//data_B[i]=seq2[2*oneD+i];
//}
// store_RGB_pixmap("lena2.ppm", data_R, data_G, data_B, width, height);
//}
hipMemcpy(d_val, h_val, nb_ele*sizeof(ulong2), hipMemcpyHostToDevice) ;
// hipMemcpy(d_val2, h_val2, nb_ele*sizeof(uint4), hipMemcpyHostToDevice) ;
hipMemcpy(d_seq,seq2, imsize*sizeof(uchar), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( prng_kernel), dim3(blocks),dim3(ssize),ssize*8, 0, d_val, d_Pbox, d_Sbox,d_random, d_seq,nb_ele,nb);
hipLaunchKernelGGL(( prng_kernel), dim3(blocks),dim3(ssize),ssize*8, 0, d_val, d_Pbox, d_Sbox, d_random, d_seq,nb_ele,nb);
hipMemcpy(seq2, d_random, nb_ele*nb*sizeof(ulong), hipMemcpyDeviceToHost) ;
//if(size!=32768) {
//for(int i=0;i<oneD;i++) {
//data_R[i]=seq2[i];
//data_G[i]=seq2[oneD+i];
//data_B[i]=seq2[2*oneD+i];
//}
//store_RGB_pixmap("lena3.ppm", data_R, data_G, data_B, width, height);
//}
/*for(int i=0;i<100;i++) {
printf("%d ",h_random[i]);
}
printf("\n");
*/
//nb_numbers+=nb*nb_ele;
need_generation=1+nb*nb_ele;
}
// printf("ici\n");
/* hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("CPU processing time : %f (ms) \n", time);
*/
need_generation--;
//printf("%d\n ", h_random[nb*nb_ele-(need_generation)]);
return h_random[nb*nb_ele-(need_generation)];
}
int main (int argc, char** argv)
{
/*
const int size=1024;
const int sizeMat=size*size;
float *h_arrayA=(float*)malloc(sizeMat*sizeof(float));
float *h_arrayB=(float*)malloc(sizeMat*sizeof(float));
float *h_arrayC=(float*)malloc(sizeMat*sizeof(float));
float *h_arrayCgpu=(float*)malloc(sizeMat*sizeof(float));
srand48(32);
for(int i=0;i<sizeMat;i++) {
h_arrayA[i]=drand48();
h_arrayB[i]=drand48();
h_arrayC[i]=0;
h_arrayCgpu[i]=0;
}
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for(int i=0;i<size;i++) {
for(int j=0;j<size;j++) {
for(int k=0;k<size;k++) {
h_arrayC[size*i+j]+=h_arrayA[size*i+k]*h_arrayB[\
size*k+j];
}
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("CPU processing time : %f (ms) \n", time);
*/
if(argc!=5) {
printf("%s size nb random1 random2\n",argv[0]);
exit(0);
}
size=atoi(argv[1]);
width =size;
height=size;
nb=atoi(argv[2]);
printf("size image %d\n",size);
if(size!=512 && size!=1024 && size!=2048 && size!=4096 && size!=8192 && size!=16384 && size!=32768) {
printf("wrong size\n");
exit(0);
}
if(nb<1 || nb>32768) {
printf("nb not good\n");
exit(0);
}
printf("nb %d\n",nb);
nb_ele=size*size/8*3/nb;
printf("nb_ele %d\n",nb_ele);
blocks=(nb_ele+ssize-1)/ssize;
test(argc, argv);
printf("nb numbers %lu\n",nb_numbers);
/*
unif01_Gen *gen;
gen = unif01_CreateExternGenBits ("raph", test);
bbattery_BigCrush (gen);
unif01_DeleteExternGenBits (gen);
*/
return 0;
}
| dcbcc2767fc27109848bb68ef63245e1f98286a7.cu | //nvcc prng21.cu -I/home/couturie/TestU01-inst/include -I/home/couturie/NVIDIA_GPU_Computing_SDK/CUDALibraries/common/inc/ -o prng21 -ltestu01 -lprobdist -lmylib -lm -L/usr/local/cuda/lib64 -lcuda -lcudart
//nvcc perf_opti_bbs2.cu -I ~/TestU01-inst2/include -I ~/NVIDIA_GPU_Computing_SDK/C/common/inc/ -o perf_opti_bbs2 -ltestu01 -lprobdist -lmylib -lm -L/usr/local/cuda/lib64 -lcuda -lcudart -L/$HOME/NVIDIA_GPU_Computing_SDK/C/lib -lcutil -arch=sm_13 -O3
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <cuda_runtime.h>
/*
extern "C" {
#include "unif01.h"
#include "bbattery.h"
}
*/
//extern "C" {
// int load_RGB_pixmap(char *filename, int *width, int *height, unsigned char**R_data, unsigned char**G_data, unsigned char**B_data);
//void store_RGB_pixmap(char *filename, unsigned char *R_data, unsigned char *G_data, unsigned char *B_data, int width, int height);
//}
typedef unsigned char uchar;
int nb=64;//512*2;
uint size=512;
//const uint nb_ele=8192*8192/4*3;//1024*1024*2;
//const uint nb_ele=9000*9000/4*3;//1024*1024*2;
uint nb_ele;
const uint ssize=512;
uint blocks;
ulong nb_numbers=0;
typedef unsigned int uint;
typedef unsigned long ulong;
typedef unsigned short ushort;
__device__ inline ulong rotl(const ulong x, int k) {
return (x << k) | (x >> (64 - k));
}
__device__ inline
ulong xoroshiro128plus(ulong2* rng) {
const ulong s0 = rng->x;
ulong s1 = rng->y;
const ulong result = rng->x + rng->y;
s1 ^= s0;
rng->x = rotl(s0, 24) ^ s1 ^ (s1 << 16); // a, b
rng->y = rotl(s1, 37); // c
return result;
}
__device__ inline
ulong xorshift64(ulong t)
{
/* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */
ulong x = t;
x ^= x >> 12; // a
x ^= x << 25; // b
x ^= x >> 27; // c
return x;
}
ulong xor128(ulong t) {
static uint x = 123456789,
y = 362436069,
z = 521288629,
w = 88675123;
t = x ^ (x << 11);
x = y; y = z; z = w;
w = w ^ (w >> 19) ^ (t ^ (t >> 8));
//printf("%u %u %u %u %u\n",x,y,z,w,t);
return w;
}
ulong *d_random;
uchar *d_Pbox;
uchar *d_Sbox;
ulong2 *d_val;
uint *h_v;
uint *h_x;
uint *h_random;
ulong2 *h_val;
//typedef struct { ulong state; ulong inc; } pcg32_random_t;
__device__ inline
uint pcg32_random_r(ulong2* rng)
{
// pcg32_random_t *rng=(pcg32_random_t*)rng2;
ulong oldstate = rng->x;
// Advance internal state
rng->x = oldstate * 6364136223846793005ULL + (rng->y|1);
// Calculate output function (XSH RR), uses old state for max ILP
uint xorshifted = ((oldstate >> 18u) ^ oldstate) >> 27u;
uint rot = oldstate >> 59u;
return (xorshifted >> rot) | (xorshifted << ((-rot) & 31));
}
__device__ __host__ inline
uint xorshift32(const uint t)
{
/* Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" */
uint x = t;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
return x;
}
__device__
ulong xor128_device(ulong4 *d) {
ulong t = d->x ^ (d->x << 11);
d->x = d->y; d->y = d->z; d->z = d->w;
d->w = d->w ^ (d->w >> 19) ^ (t ^ (t >> 8));
return d->w;
}
__device__
ulong xorshift_device(ulong *v, ulong4 *d) {
ulong t = d->x^(d->x>>7);
d->x=d->y; d->y=d->z; d->z=d->w; d->w=*v;
*v=(*v^(*v<<6))^(t^(t<<13));
return (d->y+d->y+1)*(*v);
}
__device__
unsigned long xorwow_device(ulong2 *v, ulong4 *d){
ulong t=(d->x^(d->x>>2));
d->x=d->y; d->y=d->z; d->z=d->w; d->w=v->x;
v->x=(v->x^(v->x<<4))^(t^(t<<1));
return (v->y+=362437)+v->x;
}
__device__ __host__
ulong
xorshift64star(ulong *s)
{
ulong x = s[0];
x ^= x >> 12;
x ^= x << 25;
x ^= x >> 27;
s[0] = x;
return x * (ulong)(0x2545f4914f6cdd1d);
}
__device__
ulong xor64_device(uint4 *d, uint *t) {
*t = d->x ^ (d->x << 11);
d->x = d->y; d->y = d->z; d->z= d->w;
d->w = d->w ^ (d->w >> 19) ^ ((*t) ^ ((*t) >> 8));
return d->w;
}
typedef ulong uint64_t ;
#define INT64_C(c) (c ## LL)
#define UINT64_C(c) (c ## ULL)
__device__
static inline uint64_t splitmix64(ulong *nb) {
uint64_t z = ((*nb) += UINT64_C(0x9E3779B97F4A7C15));
z = (z ^ (z >> 30)) * UINT64_C(0xBF58476D1CE4E5B9);
z = (z ^ (z >> 27)) * UINT64_C(0x94D049BB133111EB);
return z ^ (z >> 31);
}
extern __shared__ unsigned long shmem[];
const int size_pbox=32;
const int nb_sbox=16;
static int width;
static int height;
__global__
void prng_kernel( ulong2 *dpcg, uchar * __restrict__ Pbox, uchar * __restrict__ Sbox2, ulong *d_random, ulong * __restrict__ d_seq,int nb_ele,int nb) {
uint i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<nb_ele) {
ulong2 pcg=dpcg[i];
ulong w,w2,res;
uchar* res2;
unsigned offset=threadIdx.x & (size_pbox-1);
int n;
unsigned base=threadIdx.x-offset;
uchar *Sbox;
//Sbox=&Sbox2[256*(pcg.x&(nb_sbox-1))];
//Sbox=&Sbox2[256*(threadIdx.x&(nb_sbox-1))];
Sbox=Sbox2;
for(int j=0;j<nb;j++) {
// w=splitmix64(&pcg.x);
//w=xorshift64star(&pcg.x);
int o0=base+ Pbox[size_pbox*(pcg.x&15)+offset];
int o1=base+ Pbox[size_pbox*(16+pcg.y&15)+offset];
w=xoroshiro128plus(&pcg);
//too slow
/*w=pcg32_random_r(&pcg);
w=w<<32;
w=w|pcg32_random_r(&pcg);
*/
shmem[threadIdx.x]=w;
w2=xorshift64(w);
// __syncthreads();
w2=w2^shmem[o0]^shmem[o1];
res= w^w2;
// if(i==0)
// printf("%u\n",(w&(nb_sbox-1))<<7);
res2=(uchar*)&res;
res2[0]=Sbox[res2[0]];
res2[1]=Sbox[res2[1]];
res2[2]=Sbox[res2[2]];
res2[3]=Sbox[res2[3]];
res2[4]=Sbox[res2[4]];
res2[5]=Sbox[res2[5]];
res2[6]=Sbox[res2[6]];
res2[7]=Sbox[res2[7]];
/*if(i==0)
printf("%u\n",res);
*/
d_random[i+j*nb_ele]= res^d_seq[i+j*nb_ele];
}
dpcg[i]=pcg;
}
}
void rc4keyperm(uchar *key,int len, int rp,uchar *sc, int size_DK) {
//sc=1:len;
for (int i=0;i<len;i++) {
sc[i]=i;
}
for (int it = 0; it < rp; it++) {
int j0 = 1;
for(int i0 = 0; i0<len; i0++) {
j0 = (j0 + sc[i0] + sc[j0] + key[i0%size_DK] )% len;
int tmp = sc[i0];
sc[i0] = sc[j0];
sc[j0] = tmp;
}
}
}
void rc4key(uchar *key, uchar *sc, int size_DK) {
for(int i=0;i<256;i++) {
sc[i]=i;
}
uchar j0 = 0;
for(int i0=0; i0<256; i0++) {
j0 = (j0 + sc[i0] + key[i0%size_DK] )&0xFF;
uchar tmp = sc[i0];
sc[i0] = sc[j0 ];
sc[j0] = tmp;
}
}
uint test(int argc, char** argv)
{
/*
static ulong t=122190821;
t=xor128(t);
return (uint)t;
*/
static int str=0, old_str=0;
static ulong need_generation=1;
static ushort init=1;
ulong dum,j;
static uchar *data_R, *data_G, *data_B;
static long imsize;
static uchar* seq;
static uchar* seq2;
static ulong* d_seq;
static int oneD;
static uchar *Pbox;
static uchar *Sbox;
if(init==1) {
// h_val=(ulong2*)malloc(nb_ele*sizeof(ulong2));
h_val=(ulong2*)malloc(nb_ele*sizeof(ulong2));
Pbox=new uchar[32*size_pbox];
Sbox=new uchar[256*nb_sbox];
cudaMallocHost((void**)&h_random,nb_ele*nb*sizeof(ulong));
//ulong myseed=121;
ulong s1,s2;
sscanf(argv[3], "%lu", &s1);
sscanf(argv[4], "%lu", &s2);
for(int i=0;i<32;i++) {
ulong val[2];
val[0]=xorshift64star(&s1);
val[1]=xorshift64star(&s2);
uchar *DK=(uchar*)val;
rc4keyperm(DK, size_pbox, 1, &Pbox[size_pbox*i], 16);
}
for(int i=0;i<nb_sbox;i++)
rc4key(&Pbox[i*8], &Sbox[256*i], 8);
//for(int i=0;i<32;i++) {
//for(int j=0;j<size_pbox;j++)
//printf("%u ",Pbox[size_pbox*i+j]);
//printf("\n\n");
//}
printf("\n %lu %lu \n",s1,s2);
for(int i=0;i<nb_ele;i++) {
h_val[i].x=xorshift64star(&s1);
h_val[i].y=xorshift64star(&s2);
if(i==0) {
//printf("%lu %lu\n",h_val[i].x,h_val[i].y);
}
}
cudaMalloc((void**) &d_random, nb_ele*nb*sizeof(ulong)) ;
cudaMalloc((void**) &d_Pbox,size_pbox*32*sizeof(uchar)) ;
cudaMalloc((void**) &d_Sbox,nb_sbox*256*sizeof(uchar)) ;
cudaMalloc((void**) &d_val, nb_ele*sizeof(ulong2)) ;
cudaMemcpy(d_val, h_val, nb_ele*sizeof(ulong2), cudaMemcpyHostToDevice) ;
cudaMemcpy(d_Pbox, Pbox, 32*size_pbox*sizeof(uchar), cudaMemcpyHostToDevice) ;
cudaMemcpy(d_Sbox, Sbox, nb_sbox*256*sizeof(uchar), cudaMemcpyHostToDevice) ;
/*if(size==32768) {
load_RGB_pixmap("32768.ppm", &width, &height, &data_R, &data_G, &data_B);
// width=height=32768;
}
if(size==16384)
load_RGB_pixmap("16384.ppm", &width, &height, &data_R, &data_G, &data_B);
if(size==8192)
load_RGB_pixmap("8192.ppm", &width, &height, &data_R, &data_G, &data_B);
if(size==4096)
load_RGB_pixmap("4096.ppm", &width, &height, &data_R, &data_G, &data_B);
if(size==2048)
load_RGB_pixmap("2048.ppm", &width, &height, &data_R, &data_G, &data_B);
if(size==1024)
load_RGB_pixmap("1024.ppm", &width, &height, &data_R, &data_G, &data_B);
if(size==512)
load_RGB_pixmap("lena.ppm", &width, &height, &data_R, &data_G, &data_B);
*/
// store_RGB_pixmap("test.ppm", data_R, data_G, data_B, width, height);
imsize=(long)width*(long)height*3;
printf("size image %ld\n",imsize);
printf("eee1\n");
seq= new uchar[imsize];
printf("eee2\n");
seq2= new uchar[imsize];
printf("eee3\n");
oneD=width*height;
printf("size %d %d\n",width,height);
//if(size!=32768) {
for(int i=0;i<oneD;i++) {
seq[i]= 1;//data_R[i];
seq[oneD+i]=128; //data_G[i];
seq[2*oneD+i]= 255; //data_B[i];
}
/* }
else {
}*/
int val=cudaMalloc((void**)&d_seq,imsize*sizeof(uchar));
// printf("malloc %d\n",val);
if(val==cudaSuccess)
printf("OK \n",val);
val=cudaMemcpy(d_seq,seq, imsize*sizeof(uchar), cudaMemcpyHostToDevice);
//printf("memcpy %d\n",val);
if(val==cudaSuccess)
printf("OK \n",val);
init=0;
}
/* cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
*/
if(need_generation==1) {
cudaEvent_t start, stop;
float time;
prng_kernel<<<blocks,ssize,ssize*8>>>(d_val, d_Pbox, d_Sbox,d_random, d_seq,nb_ele,nb);
printf("nb blocks %d nb thd blocks %d\n",blocks,ssize);
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
//cudaEventRecord(start, 0);
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
for(int i=0;i<100;i++) {
sdkStartTimer(&timer);
prng_kernel<<<blocks,ssize,ssize*8>>>(d_val, d_Pbox, d_Sbox,d_random, d_seq,nb_ele,nb);
// cudaThreadSynchronize();
cudaDeviceSynchronize();
sdkStopTimer(&timer);
}
time = sdkGetAverageTimerValue(&timer);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&time, start, stop);
printf("GPU processing time : %f s \n", time/1000);
printf("Throughput %f Gbits/s\n", ((double)width*height*3*8)/time/1e6);
printf("image size : %ld Bytes \n", (long) width*height*3);
// cudaMemcpy(h_random, d_random, nb_ele*nb*sizeof(uint), cudaMemcpyDeviceToHost) ;
cudaMemcpy(seq2, d_random, nb_ele*nb*sizeof(ulong), cudaMemcpyDeviceToHost) ;
//if(size!=32768) {
//for(int i=0;i<oneD;i++) {
//data_R[i]=seq2[i];
//data_G[i]=seq2[oneD+i];
//data_B[i]=seq2[2*oneD+i];
//}
// store_RGB_pixmap("lena2.ppm", data_R, data_G, data_B, width, height);
//}
cudaMemcpy(d_val, h_val, nb_ele*sizeof(ulong2), cudaMemcpyHostToDevice) ;
// cudaMemcpy(d_val2, h_val2, nb_ele*sizeof(uint4), cudaMemcpyHostToDevice) ;
cudaMemcpy(d_seq,seq2, imsize*sizeof(uchar), cudaMemcpyHostToDevice);
prng_kernel<<<blocks,ssize,ssize*8>>>(d_val, d_Pbox, d_Sbox,d_random, d_seq,nb_ele,nb);
prng_kernel<<<blocks,ssize,ssize*8>>>( d_val, d_Pbox, d_Sbox, d_random, d_seq,nb_ele,nb);
cudaMemcpy(seq2, d_random, nb_ele*nb*sizeof(ulong), cudaMemcpyDeviceToHost) ;
//if(size!=32768) {
//for(int i=0;i<oneD;i++) {
//data_R[i]=seq2[i];
//data_G[i]=seq2[oneD+i];
//data_B[i]=seq2[2*oneD+i];
//}
//store_RGB_pixmap("lena3.ppm", data_R, data_G, data_B, width, height);
//}
/*for(int i=0;i<100;i++) {
printf("%d ",h_random[i]);
}
printf("\n");
*/
//nb_numbers+=nb*nb_ele;
need_generation=1+nb*nb_ele;
}
// printf("ici\n");
/* cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("CPU processing time : %f (ms) \n", time);
*/
need_generation--;
//printf("%d\n ", h_random[nb*nb_ele-(need_generation)]);
return h_random[nb*nb_ele-(need_generation)];
}
int main (int argc, char** argv)
{
/*
const int size=1024;
const int sizeMat=size*size;
float *h_arrayA=(float*)malloc(sizeMat*sizeof(float));
float *h_arrayB=(float*)malloc(sizeMat*sizeof(float));
float *h_arrayC=(float*)malloc(sizeMat*sizeof(float));
float *h_arrayCgpu=(float*)malloc(sizeMat*sizeof(float));
srand48(32);
for(int i=0;i<sizeMat;i++) {
h_arrayA[i]=drand48();
h_arrayB[i]=drand48();
h_arrayC[i]=0;
h_arrayCgpu[i]=0;
}
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for(int i=0;i<size;i++) {
for(int j=0;j<size;j++) {
for(int k=0;k<size;k++) {
h_arrayC[size*i+j]+=h_arrayA[size*i+k]*h_arrayB[\
size*k+j];
}
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("CPU processing time : %f (ms) \n", time);
*/
if(argc!=5) {
printf("%s size nb random1 random2\n",argv[0]);
exit(0);
}
size=atoi(argv[1]);
width =size;
height=size;
nb=atoi(argv[2]);
printf("size image %d\n",size);
if(size!=512 && size!=1024 && size!=2048 && size!=4096 && size!=8192 && size!=16384 && size!=32768) {
printf("wrong size\n");
exit(0);
}
if(nb<1 || nb>32768) {
printf("nb not good\n");
exit(0);
}
printf("nb %d\n",nb);
nb_ele=size*size/8*3/nb;
printf("nb_ele %d\n",nb_ele);
blocks=(nb_ele+ssize-1)/ssize;
test(argc, argv);
printf("nb numbers %lu\n",nb_numbers);
/*
unif01_Gen *gen;
gen = unif01_CreateExternGenBits ("raph", test);
bbattery_BigCrush (gen);
unif01_DeleteExternGenBits (gen);
*/
return 0;
}
|
1859633bca2e943cf8aed054d32bdb3e927882da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Milad Rakhsha, Wei Hu
// =============================================================================
//
// Base class for processing proximity in fsi system.
// =============================================================================
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChCollisionSystemFsi.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
#include "chrono_fsi/utils/ChUtilsDevice.cuh"
namespace chrono {
namespace fsi {
// calcHashD :
// 1. Get particle index determined by the block and thread we are in.
// 2. From x, y, z position, determine which bin it is in.
// 3. Calculate hash from bin index.
// 4. Store hash and particle index associated with it.
__global__ void calcHashD(
uint* gridMarkerHashD, // gridMarkerHash Store particle hash here
uint* gridMarkerIndexD, // gridMarkerIndex Store particle index here
Real4* posRad, // posRad Vector containing the positions of all particles (SPH and BCE)
const size_t numAllMarkers, // Total number of particles (fluid + boundary)
volatile bool* isErrorD) {
/* Calculate the index of where the particle is stored in posRad. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
Real3 p = mR3(posRad[index]);
if (!(isfinite(p.x) && isfinite(p.y) && isfinite(p.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, calcHashD !\n");
*isErrorD = true;
return;
}
/* Check particle is inside the domain. */
Real3 boxCorner = paramsD.worldOrigin - mR3(40 * paramsD.HSML);
if (p.x < boxCorner.x || p.y < boxCorner.y || p.z < boxCorner.z) {
printf(
"Out of Min Boundary, point %f %f %f, boundary min: %f %f %f. "
"Thrown from ChCollisionSystemFsi.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
boxCorner = paramsD.worldOrigin + paramsD.boxDims + mR3(40 * paramsD.HSML);
if (p.x > boxCorner.x || p.y > boxCorner.y || p.z > boxCorner.z) {
printf(
"Out of max Boundary, point %f %f %f, boundary max: %f %f %f. "
"Thrown from ChCollisionSystemFsi.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
/* Get x,y,z bin index in grid */
int3 gridPos = calcGridPos(p);
/* Calculate a hash from the bin index */
uint hash = calcGridHash(gridPos);
/* Store grid hash */
gridMarkerHashD[index] = hash;
/* Store particle index associated to the hash we stored in gridMarkerHashD */
gridMarkerIndexD[index] = index;
}
/**
* @brief reorderDataAndFindCellStartD
* @details See ChCollisionSystemFsi.cuh for more info
*/
__global__ void reorderDataAndFindCellStartD(uint* cellStartD, // output: cell start index
uint* cellEndD, // output: cell end index
Real4* sortedPosRadD, // output: sorted positions
Real3* sortedVelMasD, // output: sorted velocities
Real4* sortedRhoPreMuD, // output: sorted density pressure
Real3* sortedTauXxYyZzD, // output: sorted total stress xxyyzz
Real3* sortedTauXyXzYzD, // output: sorted total stress xyzxyz
Real3* tauXxYyZzD, // input: original total stress xxyyzz
Real3* tauXyXzYzD, // input: original total stress xyzxyz
uint* gridMarkerHashD, // input: sorted grid hashes
uint* gridMarkerIndexD, // input: sorted particle indices
Real4* posRadD, // input: original position array
Real3* velMasD, // input: original velocity array
Real4* rhoPresMuD, // input: original density pressure
const size_t numAllMarkers) {
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
/* Get the particle index the current thread is supposed to be looking at. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint hash;
/* handle case when no. of particles not multiple of block size */
if (index < numAllMarkers) {
hash = gridMarkerHashD[index];
/* Load hash data into shared memory so that we can look at neighboring
* particle's hash
* value without loading two hash values per thread
*/
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
/* first thread in block must load neighbor particle hash */
sharedHash[0] = gridMarkerHashD[index - 1];
}
}
__syncthreads();
if (index < numAllMarkers) {
/* If this particle has a different cell index to the previous particle then
* it must be
* the first particle in the cell, so store the index of this particle in
* the cell. As it
* isn't the first particle, it must also be the cell end of the previous
* particle's cell
*/
if (index == 0 || hash != sharedHash[threadIdx.x]) {
cellStartD[hash] = index;
if (index > 0)
cellEndD[sharedHash[threadIdx.x]] = index;
}
if (index == numAllMarkers - 1) {
cellEndD[hash] = index + 1;
}
/* Now use the sorted index to reorder the pos and vel data */
uint originalIndex = gridMarkerIndexD[index]; // map sorted to original
Real3 posRad = mR3(posRadD[originalIndex]); // macro does either global read or
// texture fetch
Real3 velMas = velMasD[originalIndex]; // see particles_kernel.cuh
Real4 rhoPreMu = rhoPresMuD[originalIndex];
if (!(isfinite(posRad.x) && isfinite(posRad.y) && isfinite(posRad.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(velMas.x) && isfinite(velMas.y) && isfinite(velMas.z))) {
printf(
"Error! particle velocity is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(rhoPreMu.x) && isfinite(rhoPreMu.y) && isfinite(rhoPreMu.z) && isfinite(rhoPreMu.w))) {
printf(
"Error! particle rhoPreMu is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedPosRadD[index] = mR4(posRad, posRadD[originalIndex].w);
sortedVelMasD[index] = velMas;
sortedRhoPreMuD[index] = rhoPreMu;
// For granular material
if( paramsD.elastic_SPH ) {
Real3 tauXxYyZz = tauXxYyZzD[originalIndex];
Real3 tauXyXzYz = tauXyXzYzD[originalIndex];
if (!(isfinite(tauXxYyZz.x) && isfinite(tauXxYyZz.y) && isfinite(tauXxYyZz.z))) {
printf(
"Error! particle tauXxYyZz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXyXzYz.x) && isfinite(tauXyXzYz.y) && isfinite(tauXyXzYz.z))) {
printf(
"Error! particle tauXyXzYz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedTauXxYyZzD[index] = tauXxYyZz;
sortedTauXyXzYzD[index] = tauXyXzYz;
}
}
}
__global__ void findCellStartEndD(uint* cellStartD, // output: cell start index
uint* cellEndD, // output: cell end index
uint* gridMarkerHashD, // input: sorted grid hashes
uint* gridMarkerIndexD, // input: sorted particle indices
const size_t numAllMarkers) {
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
/* Get the particle index the current thread is supposed to be looking at. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint hash;
/* handle case when no. of particles not multiple of block size */
if (index < numAllMarkers) {
hash = gridMarkerHashD[index];
/* Load hash data into shared memory so that we can look at neighboring
* particle's hash
* value without loading two hash values per thread
*/
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
/* first thread in block must load neighbor particle hash */
sharedHash[0] = gridMarkerHashD[index - 1];
}
}
__syncthreads();
if (index < numAllMarkers) {
/* If this particle has a different cell index to the previous particle then
* it must be
* the first particle in the cell, so store the index of this particle in
* the cell. As it
* isn't the first particle, it must also be the cell end of the previous
* particle's cell
*/
if (index == 0 || hash != sharedHash[threadIdx.x]) {
cellStartD[hash] = index;
if (index > 0)
cellEndD[sharedHash[threadIdx.x]] = index;
}
if (index == numAllMarkers - 1) {
cellEndD[hash] = index + 1;
}
}
}
__global__ void reorderDataD(uint* gridMarkerIndexD, // input: sorted particle indices
uint* extendedActivityIdD, // input: particles in an extended active sub-domain
uint* mapOriginalToSorted, // input: original index to sorted index
Real4* sortedPosRadD, // output: sorted positions
Real3* sortedVelMasD, // output: sorted velocities
Real4* sortedRhoPreMuD, // output: sorted density pressure
Real3* sortedTauXxYyZzD, // output: sorted total stress xxyyzz
Real3* sortedTauXyXzYzD, // output: sorted total stress xyzxyz
Real4* posRadD, // input: original position array
Real3* velMasD, // input: original velocity array
Real4* rhoPresMuD, // input: original density pressure
Real3* tauXxYyZzD, // input: original total stress xxyyzz
Real3* tauXyXzYzD, // input: original total stress xyzxyz
const size_t numAllMarkers) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numAllMarkers)
return;
// Now use the sorted index to reorder the pos and vel data
uint originalIndex = id;
// no need to do anything if it is not an active particle
uint activity = extendedActivityIdD[originalIndex];
if(activity == 0)
return;
// map original to sorted
uint index = mapOriginalToSorted[originalIndex];
Real3 posRad = mR3(posRadD[originalIndex]);
Real3 velMas = velMasD[originalIndex];
Real4 rhoPreMu = rhoPresMuD[originalIndex];
if (!(isfinite(posRad.x) && isfinite(posRad.y) && isfinite(posRad.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(velMas.x) && isfinite(velMas.y) && isfinite(velMas.z))) {
printf(
"Error! particle velocity is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(rhoPreMu.x) && isfinite(rhoPreMu.y) && isfinite(rhoPreMu.z) && isfinite(rhoPreMu.w))) {
printf(
"Error! particle rhoPreMu is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedPosRadD[index] = mR4(posRad, posRadD[originalIndex].w);
sortedVelMasD[index] = velMas;
sortedRhoPreMuD[index] = rhoPreMu;
// For granular material
if( paramsD.elastic_SPH ) {
Real3 tauXxYyZz = tauXxYyZzD[originalIndex];
Real3 tauXyXzYz = tauXyXzYzD[originalIndex];
if (!(isfinite(tauXxYyZz.x) && isfinite(tauXxYyZz.y) && isfinite(tauXxYyZz.z))) {
printf(
"Error! particle tauXxYyZz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXyXzYz.x) && isfinite(tauXyXzYz.y) && isfinite(tauXyXzYz.z))) {
printf(
"Error! particle tauXyXzYz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedTauXxYyZzD[index] = tauXxYyZz;
sortedTauXyXzYzD[index] = tauXyXzYz;
}
}
__global__ void OriginalToSortedD(uint* mapOriginalToSorted,
uint* gridMarkerIndex,
const size_t numAllMarkers) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numAllMarkers)
return;
uint index = gridMarkerIndex[id];
mapOriginalToSorted[index] = id;
}
//--------------------------------------------------------------------------------------------------------------------------------
ChCollisionSystemFsi::ChCollisionSystemFsi(std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<NumberOfObjects> otherNumObjects)
: sortedSphMarkersD(otherSortedSphMarkersD),
markersProximityD(otherMarkersProximityD),
fsiGeneralData(otherFsiGeneralData),
paramsH(otherParamsH),
numObjectsH(otherNumObjects) {
sphMarkersD = NULL;
}
ChCollisionSystemFsi::~ChCollisionSystemFsi() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::Finalize() {
hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects));
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::calcHash() {
if (!(markersProximityD->gridMarkerHashD.size() == numObjectsH->numAllMarkers &&
markersProximityD->gridMarkerIndexD.size() == numObjectsH->numAllMarkers)) {
printf(
"mError! calcHash!, gridMarkerHashD.size() %zu "
"gridMarkerIndexD.size() %zu numObjectsH->numAllMarkers %zu \n",
markersProximityD->gridMarkerHashD.size(), markersProximityD->gridMarkerIndexD.size(),
numObjectsH->numAllMarkers);
throw std::runtime_error("Error! size error, calcHash!");
}
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------------------------------------------------------
/* Is there a need to optimize the number of threads used at once? */
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
/* Execute Kernel */
hipLaunchKernelGGL(( calcHashD), dim3(numBlocks), dim3(numThreads), 0, 0, U1CAST(markersProximityD->gridMarkerHashD),
U1CAST(markersProximityD->gridMarkerIndexD),
mR4CAST(sphMarkersD->posRadD),
numObjectsH->numAllMarkers, isErrorD);
/* Check for errors in kernel execution */
hipDeviceSynchronize();
cudaCheckError();
//------------------------------------------------------------------------
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed in calcHashD!\n");
}
hipFree(isErrorD);
free(isErrorH);
}
void ChCollisionSystemFsi::ResetCellSize(int s) {
markersProximityD->cellStartD.resize(s);
markersProximityD->cellEndD.resize(s);
}
void ChCollisionSystemFsi::reorderDataAndFindCellStart() {
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
if (!(markersProximityD->cellStartD.size() == numCells && markersProximityD->cellEndD.size() == numCells)) {
throw std::runtime_error("Error! size error, reorderDataAndFindCellStart!\n");
}
thrust::fill(markersProximityD->cellStartD.begin(), markersProximityD->cellStartD.end(), 0);
thrust::fill(markersProximityD->cellEndD.begin(), markersProximityD->cellEndD.end(), 0);
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); // 256 is blockSize
uint smemSize = sizeof(uint) * (numThreads + 1);
// Find the start index and the end index of the sorted array in each cell
hipLaunchKernelGGL(( findCellStartEndD), dim3(numBlocks), dim3(numThreads), smemSize, 0,
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD),
U1CAST(markersProximityD->gridMarkerHashD), U1CAST(markersProximityD->gridMarkerIndexD),
numObjectsH->numAllMarkers);
hipDeviceSynchronize();
cudaCheckError();
// Launch a kernel to find the location of original particles in the sorted arrays.
// This is faster than using thrust::sort_by_key()
hipLaunchKernelGGL(( OriginalToSortedD), dim3(numBlocks), dim3(numThreads), 0, 0,
U1CAST(markersProximityD->mapOriginalToSorted),
U1CAST(markersProximityD->gridMarkerIndexD), numObjectsH->numAllMarkers);
// Reorder the arrays according to the sorted index of all particles
hipLaunchKernelGGL(( reorderDataD), dim3(numBlocks), dim3(numThreads), 0, 0,
U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(fsiGeneralData->extendedActivityIdD),
U1CAST(markersProximityD->mapOriginalToSorted),
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(sortedSphMarkersD->tauXxYyZzD),
mR3CAST(sortedSphMarkersD->tauXyXzYzD), mR4CAST(sphMarkersD->posRadD),
mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD),
mR3CAST(sphMarkersD->tauXxYyZzD), mR3CAST(sphMarkersD->tauXyXzYzD),
numObjectsH->numAllMarkers);
hipDeviceSynchronize();
cudaCheckError();
}
void ChCollisionSystemFsi::ArrangeData(std::shared_ptr<SphMarkerDataD> otherSphMarkersD) {
sphMarkersD = otherSphMarkersD;
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
ResetCellSize(numCells);
calcHash();
thrust::sort_by_key(markersProximityD->gridMarkerHashD.begin(), markersProximityD->gridMarkerHashD.end(),
markersProximityD->gridMarkerIndexD.begin());
reorderDataAndFindCellStart();
}
} // end namespace fsi
} // end namespace chrono
| 1859633bca2e943cf8aed054d32bdb3e927882da.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Milad Rakhsha, Wei Hu
// =============================================================================
//
// Base class for processing proximity in fsi system.
// =============================================================================
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChCollisionSystemFsi.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
#include "chrono_fsi/utils/ChUtilsDevice.cuh"
namespace chrono {
namespace fsi {
// calcHashD :
// 1. Get particle index determined by the block and thread we are in.
// 2. From x, y, z position, determine which bin it is in.
// 3. Calculate hash from bin index.
// 4. Store hash and particle index associated with it.
__global__ void calcHashD(
uint* gridMarkerHashD, // gridMarkerHash Store particle hash here
uint* gridMarkerIndexD, // gridMarkerIndex Store particle index here
Real4* posRad, // posRad Vector containing the positions of all particles (SPH and BCE)
const size_t numAllMarkers, // Total number of particles (fluid + boundary)
volatile bool* isErrorD) {
/* Calculate the index of where the particle is stored in posRad. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
Real3 p = mR3(posRad[index]);
if (!(isfinite(p.x) && isfinite(p.y) && isfinite(p.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, calcHashD !\n");
*isErrorD = true;
return;
}
/* Check particle is inside the domain. */
Real3 boxCorner = paramsD.worldOrigin - mR3(40 * paramsD.HSML);
if (p.x < boxCorner.x || p.y < boxCorner.y || p.z < boxCorner.z) {
printf(
"Out of Min Boundary, point %f %f %f, boundary min: %f %f %f. "
"Thrown from ChCollisionSystemFsi.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
boxCorner = paramsD.worldOrigin + paramsD.boxDims + mR3(40 * paramsD.HSML);
if (p.x > boxCorner.x || p.y > boxCorner.y || p.z > boxCorner.z) {
printf(
"Out of max Boundary, point %f %f %f, boundary max: %f %f %f. "
"Thrown from ChCollisionSystemFsi.cu, calcHashD !\n",
p.x, p.y, p.z, boxCorner.x, boxCorner.y, boxCorner.z);
*isErrorD = true;
return;
}
/* Get x,y,z bin index in grid */
int3 gridPos = calcGridPos(p);
/* Calculate a hash from the bin index */
uint hash = calcGridHash(gridPos);
/* Store grid hash */
gridMarkerHashD[index] = hash;
/* Store particle index associated to the hash we stored in gridMarkerHashD */
gridMarkerIndexD[index] = index;
}
/**
* @brief reorderDataAndFindCellStartD
* @details See ChCollisionSystemFsi.cuh for more info
*/
__global__ void reorderDataAndFindCellStartD(uint* cellStartD, // output: cell start index
uint* cellEndD, // output: cell end index
Real4* sortedPosRadD, // output: sorted positions
Real3* sortedVelMasD, // output: sorted velocities
Real4* sortedRhoPreMuD, // output: sorted density pressure
Real3* sortedTauXxYyZzD, // output: sorted total stress xxyyzz
Real3* sortedTauXyXzYzD, // output: sorted total stress xyzxyz
Real3* tauXxYyZzD, // input: original total stress xxyyzz
Real3* tauXyXzYzD, // input: original total stress xyzxyz
uint* gridMarkerHashD, // input: sorted grid hashes
uint* gridMarkerIndexD, // input: sorted particle indices
Real4* posRadD, // input: original position array
Real3* velMasD, // input: original velocity array
Real4* rhoPresMuD, // input: original density pressure
const size_t numAllMarkers) {
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
/* Get the particle index the current thread is supposed to be looking at. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint hash;
/* handle case when no. of particles not multiple of block size */
if (index < numAllMarkers) {
hash = gridMarkerHashD[index];
/* Load hash data into shared memory so that we can look at neighboring
* particle's hash
* value without loading two hash values per thread
*/
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
/* first thread in block must load neighbor particle hash */
sharedHash[0] = gridMarkerHashD[index - 1];
}
}
__syncthreads();
if (index < numAllMarkers) {
/* If this particle has a different cell index to the previous particle then
* it must be
* the first particle in the cell, so store the index of this particle in
* the cell. As it
* isn't the first particle, it must also be the cell end of the previous
* particle's cell
*/
if (index == 0 || hash != sharedHash[threadIdx.x]) {
cellStartD[hash] = index;
if (index > 0)
cellEndD[sharedHash[threadIdx.x]] = index;
}
if (index == numAllMarkers - 1) {
cellEndD[hash] = index + 1;
}
/* Now use the sorted index to reorder the pos and vel data */
uint originalIndex = gridMarkerIndexD[index]; // map sorted to original
Real3 posRad = mR3(posRadD[originalIndex]); // macro does either global read or
// texture fetch
Real3 velMas = velMasD[originalIndex]; // see particles_kernel.cuh
Real4 rhoPreMu = rhoPresMuD[originalIndex];
if (!(isfinite(posRad.x) && isfinite(posRad.y) && isfinite(posRad.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(velMas.x) && isfinite(velMas.y) && isfinite(velMas.z))) {
printf(
"Error! particle velocity is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(rhoPreMu.x) && isfinite(rhoPreMu.y) && isfinite(rhoPreMu.z) && isfinite(rhoPreMu.w))) {
printf(
"Error! particle rhoPreMu is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedPosRadD[index] = mR4(posRad, posRadD[originalIndex].w);
sortedVelMasD[index] = velMas;
sortedRhoPreMuD[index] = rhoPreMu;
// For granular material
if( paramsD.elastic_SPH ) {
Real3 tauXxYyZz = tauXxYyZzD[originalIndex];
Real3 tauXyXzYz = tauXyXzYzD[originalIndex];
if (!(isfinite(tauXxYyZz.x) && isfinite(tauXxYyZz.y) && isfinite(tauXxYyZz.z))) {
printf(
"Error! particle tauXxYyZz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXyXzYz.x) && isfinite(tauXyXzYz.y) && isfinite(tauXyXzYz.z))) {
printf(
"Error! particle tauXyXzYz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedTauXxYyZzD[index] = tauXxYyZz;
sortedTauXyXzYzD[index] = tauXyXzYz;
}
}
}
__global__ void findCellStartEndD(uint* cellStartD, // output: cell start index
uint* cellEndD, // output: cell end index
uint* gridMarkerHashD, // input: sorted grid hashes
uint* gridMarkerIndexD, // input: sorted particle indices
const size_t numAllMarkers) {
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
/* Get the particle index the current thread is supposed to be looking at. */
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint hash;
/* handle case when no. of particles not multiple of block size */
if (index < numAllMarkers) {
hash = gridMarkerHashD[index];
/* Load hash data into shared memory so that we can look at neighboring
* particle's hash
* value without loading two hash values per thread
*/
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0) {
/* first thread in block must load neighbor particle hash */
sharedHash[0] = gridMarkerHashD[index - 1];
}
}
__syncthreads();
if (index < numAllMarkers) {
/* If this particle has a different cell index to the previous particle then
* it must be
* the first particle in the cell, so store the index of this particle in
* the cell. As it
* isn't the first particle, it must also be the cell end of the previous
* particle's cell
*/
if (index == 0 || hash != sharedHash[threadIdx.x]) {
cellStartD[hash] = index;
if (index > 0)
cellEndD[sharedHash[threadIdx.x]] = index;
}
if (index == numAllMarkers - 1) {
cellEndD[hash] = index + 1;
}
}
}
__global__ void reorderDataD(uint* gridMarkerIndexD, // input: sorted particle indices
uint* extendedActivityIdD, // input: particles in an extended active sub-domain
uint* mapOriginalToSorted, // input: original index to sorted index
Real4* sortedPosRadD, // output: sorted positions
Real3* sortedVelMasD, // output: sorted velocities
Real4* sortedRhoPreMuD, // output: sorted density pressure
Real3* sortedTauXxYyZzD, // output: sorted total stress xxyyzz
Real3* sortedTauXyXzYzD, // output: sorted total stress xyzxyz
Real4* posRadD, // input: original position array
Real3* velMasD, // input: original velocity array
Real4* rhoPresMuD, // input: original density pressure
Real3* tauXxYyZzD, // input: original total stress xxyyzz
Real3* tauXyXzYzD, // input: original total stress xyzxyz
const size_t numAllMarkers) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numAllMarkers)
return;
// Now use the sorted index to reorder the pos and vel data
uint originalIndex = id;
// no need to do anything if it is not an active particle
uint activity = extendedActivityIdD[originalIndex];
if(activity == 0)
return;
// map original to sorted
uint index = mapOriginalToSorted[originalIndex];
Real3 posRad = mR3(posRadD[originalIndex]);
Real3 velMas = velMasD[originalIndex];
Real4 rhoPreMu = rhoPresMuD[originalIndex];
if (!(isfinite(posRad.x) && isfinite(posRad.y) && isfinite(posRad.z))) {
printf(
"Error! particle position is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(velMas.x) && isfinite(velMas.y) && isfinite(velMas.z))) {
printf(
"Error! particle velocity is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(rhoPreMu.x) && isfinite(rhoPreMu.y) && isfinite(rhoPreMu.z) && isfinite(rhoPreMu.w))) {
printf(
"Error! particle rhoPreMu is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedPosRadD[index] = mR4(posRad, posRadD[originalIndex].w);
sortedVelMasD[index] = velMas;
sortedRhoPreMuD[index] = rhoPreMu;
// For granular material
if( paramsD.elastic_SPH ) {
Real3 tauXxYyZz = tauXxYyZzD[originalIndex];
Real3 tauXyXzYz = tauXyXzYzD[originalIndex];
if (!(isfinite(tauXxYyZz.x) && isfinite(tauXxYyZz.y) && isfinite(tauXxYyZz.z))) {
printf(
"Error! particle tauXxYyZz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
if (!(isfinite(tauXyXzYz.x) && isfinite(tauXyXzYz.y) && isfinite(tauXyXzYz.z))) {
printf(
"Error! particle tauXyXzYz is NAN: thrown from "
"ChCollisionSystemFsi.cu, reorderDataAndFindCellStartD !\n");
}
sortedTauXxYyZzD[index] = tauXxYyZz;
sortedTauXyXzYzD[index] = tauXyXzYz;
}
}
__global__ void OriginalToSortedD(uint* mapOriginalToSorted,
uint* gridMarkerIndex,
const size_t numAllMarkers) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numAllMarkers)
return;
uint index = gridMarkerIndex[id];
mapOriginalToSorted[index] = id;
}
//--------------------------------------------------------------------------------------------------------------------------------
ChCollisionSystemFsi::ChCollisionSystemFsi(std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<NumberOfObjects> otherNumObjects)
: sortedSphMarkersD(otherSortedSphMarkersD),
markersProximityD(otherMarkersProximityD),
fsiGeneralData(otherFsiGeneralData),
paramsH(otherParamsH),
numObjectsH(otherNumObjects) {
sphMarkersD = NULL;
}
ChCollisionSystemFsi::~ChCollisionSystemFsi() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::Finalize() {
cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects));
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChCollisionSystemFsi::calcHash() {
if (!(markersProximityD->gridMarkerHashD.size() == numObjectsH->numAllMarkers &&
markersProximityD->gridMarkerIndexD.size() == numObjectsH->numAllMarkers)) {
printf(
"mError! calcHash!, gridMarkerHashD.size() %zu "
"gridMarkerIndexD.size() %zu numObjectsH->numAllMarkers %zu \n",
markersProximityD->gridMarkerHashD.size(), markersProximityD->gridMarkerIndexD.size(),
numObjectsH->numAllMarkers);
throw std::runtime_error("Error! size error, calcHash!");
}
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------------------------------------------------------
/* Is there a need to optimize the number of threads used at once? */
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
/* Execute Kernel */
calcHashD<<<numBlocks, numThreads>>>(U1CAST(markersProximityD->gridMarkerHashD),
U1CAST(markersProximityD->gridMarkerIndexD),
mR4CAST(sphMarkersD->posRadD),
numObjectsH->numAllMarkers, isErrorD);
/* Check for errors in kernel execution */
cudaDeviceSynchronize();
cudaCheckError();
//------------------------------------------------------------------------
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed in calcHashD!\n");
}
cudaFree(isErrorD);
free(isErrorH);
}
void ChCollisionSystemFsi::ResetCellSize(int s) {
markersProximityD->cellStartD.resize(s);
markersProximityD->cellEndD.resize(s);
}
void ChCollisionSystemFsi::reorderDataAndFindCellStart() {
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
if (!(markersProximityD->cellStartD.size() == numCells && markersProximityD->cellEndD.size() == numCells)) {
throw std::runtime_error("Error! size error, reorderDataAndFindCellStart!\n");
}
thrust::fill(markersProximityD->cellStartD.begin(), markersProximityD->cellStartD.end(), 0);
thrust::fill(markersProximityD->cellEndD.begin(), markersProximityD->cellEndD.end(), 0);
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); // 256 is blockSize
uint smemSize = sizeof(uint) * (numThreads + 1);
// Find the start index and the end index of the sorted array in each cell
findCellStartEndD<<<numBlocks, numThreads, smemSize>>>(
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD),
U1CAST(markersProximityD->gridMarkerHashD), U1CAST(markersProximityD->gridMarkerIndexD),
numObjectsH->numAllMarkers);
cudaDeviceSynchronize();
cudaCheckError();
// Launch a kernel to find the location of original particles in the sorted arrays.
// This is faster than using thrust::sort_by_key()
OriginalToSortedD<<<numBlocks, numThreads>>>(
U1CAST(markersProximityD->mapOriginalToSorted),
U1CAST(markersProximityD->gridMarkerIndexD), numObjectsH->numAllMarkers);
// Reorder the arrays according to the sorted index of all particles
reorderDataD<<<numBlocks, numThreads>>>(
U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(fsiGeneralData->extendedActivityIdD),
U1CAST(markersProximityD->mapOriginalToSorted),
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(sortedSphMarkersD->tauXxYyZzD),
mR3CAST(sortedSphMarkersD->tauXyXzYzD), mR4CAST(sphMarkersD->posRadD),
mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD),
mR3CAST(sphMarkersD->tauXxYyZzD), mR3CAST(sphMarkersD->tauXyXzYzD),
numObjectsH->numAllMarkers);
cudaDeviceSynchronize();
cudaCheckError();
}
void ChCollisionSystemFsi::ArrangeData(std::shared_ptr<SphMarkerDataD> otherSphMarkersD) {
sphMarkersD = otherSphMarkersD;
int3 cellsDim = paramsH->gridSize;
int numCells = cellsDim.x * cellsDim.y * cellsDim.z;
ResetCellSize(numCells);
calcHash();
thrust::sort_by_key(markersProximityD->gridMarkerHashD.begin(), markersProximityD->gridMarkerHashD.end(),
markersProximityD->gridMarkerIndexD.begin());
reorderDataAndFindCellStart();
}
} // end namespace fsi
} // end namespace chrono
|
2c467f4642f9cc3496c2adb374fa5c52467fe547.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zlascl_2x2.cu normal z -> d, Sat Nov 15 19:53:59 2014
@author Ichitaro Yamazaki
*/
#include "common_magma.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
dlascl_2x2_full(int m, const double* W, int ldw, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double D21 = W( 1, 0 );
double D11 = MAGMA_D_DIV( W( 1, 1 ), D21 );
double D22 = MAGMA_D_DIV( W( 0, 0 ), MAGMA_D_CNJG( D21 ) );
double T = 1.0 / ( MAGMA_D_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_D_DIV( MAGMA_D_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_D_CNJG( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_2x2_lower(int m, const double* W, int ldw, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double D21 = W( 1, 0 );
double D11 = MAGMA_D_DIV( W( 1, 1 ), D21 );
double D22 = MAGMA_D_DIV( W( 0, 0 ), MAGMA_D_CNJG( D21 ) );
double T = 1.0 / ( MAGMA_D_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_D_DIV( MAGMA_D_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_D_CNJG( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_2x2_upper(int m, const double *W, int ldw, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double D21 = W( m, 1 );
double D11 = MAGMA_D_DIV( W( m+1, 1 ), MAGMA_D_CNJG( D21 ) );
double D22 = MAGMA_D_DIV( W( m, 0 ), D21 );
double T = 1.0 / ( MAGMA_D_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_D_DIV( MAGMA_D_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_D_CNJG( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/**
Purpose
-------
DLASCL2 scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in]
dD DOUBLE PRECISION vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
\param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl_2x2_q(
magma_type_t type, magma_int_t m,
const double *dW, magma_int_t lddw,
double *dA, magma_int_t ldda,
magma_int_t *info, magma_queue_t queue )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( dlascl_2x2_lower) , dim3(grid), dim3(threads), 0, queue , m, dW, lddw, dA, ldda);
}
else {
hipLaunchKernelGGL(( dlascl_2x2_upper) , dim3(grid), dim3(threads), 0, queue , m, dW, lddw, dA, ldda);
}
}
/**
@see magmablas_dlascl2_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl_2x2(
magma_type_t type, magma_int_t m,
double *dW, magma_int_t lddw,
double *dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_dlascl_2x2_q( type, m, dW, lddw, dA, ldda, info, magma_stream );
}
| 2c467f4642f9cc3496c2adb374fa5c52467fe547.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zlascl_2x2.cu normal z -> d, Sat Nov 15 19:53:59 2014
@author Ichitaro Yamazaki
*/
#include "common_magma.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
dlascl_2x2_full(int m, const double* W, int ldw, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double D21 = W( 1, 0 );
double D11 = MAGMA_D_DIV( W( 1, 1 ), D21 );
double D22 = MAGMA_D_DIV( W( 0, 0 ), MAGMA_D_CNJG( D21 ) );
double T = 1.0 / ( MAGMA_D_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_D_DIV( MAGMA_D_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_D_CNJG( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_2x2_lower(int m, const double* W, int ldw, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double D21 = W( 1, 0 );
double D11 = MAGMA_D_DIV( W( 1, 1 ), D21 );
double D22 = MAGMA_D_DIV( W( 0, 0 ), MAGMA_D_CNJG( D21 ) );
double T = 1.0 / ( MAGMA_D_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_D_DIV( MAGMA_D_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_D_CNJG( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_2x2_upper(int m, const double *W, int ldw, double* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double D21 = W( m, 1 );
double D11 = MAGMA_D_DIV( W( m+1, 1 ), MAGMA_D_CNJG( D21 ) );
double D22 = MAGMA_D_DIV( W( m, 0 ), D21 );
double T = 1.0 / ( MAGMA_D_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_D_DIV( MAGMA_D_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_D_CNJG( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/**
Purpose
-------
DLASCL2 scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in]
dD DOUBLE PRECISION vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
\param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl_2x2_q(
magma_type_t type, magma_int_t m,
const double *dW, magma_int_t lddw,
double *dA, magma_int_t ldda,
magma_int_t *info, magma_queue_t queue )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
if (type == MagmaLower) {
dlascl_2x2_lower <<< grid, threads, 0, queue >>> (m, dW, lddw, dA, ldda);
}
else {
dlascl_2x2_upper <<< grid, threads, 0, queue >>> (m, dW, lddw, dA, ldda);
}
}
/**
@see magmablas_dlascl2_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlascl_2x2(
magma_type_t type, magma_int_t m,
double *dW, magma_int_t lddw,
double *dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_dlascl_2x2_q( type, m, dW, lddw, dA, ldda, info, magma_stream );
}
|
b3101c6b0653f7ddac9fec64795d1f0d4deae7fb.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include "anaglyph.hpp"
#include "backend/common/imageOps.hpp"
#include "cuda/util.hpp"
#include "cuda/error.hpp"
#include <hip/hip_runtime.h>
namespace VideoStitch {
using namespace Image;
namespace Output {
// http://www.site.uottawa.ca/~edubois/anaglyph/LeastSquaresHowToPhotoshop.pdf
__global__ void anaglyphColorLeftKernel(uint32_t* dst, const uint32_t* src, const int64_t height, const int64_t width) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
const uint32_t val = src[y * width + x];
const int32_t r = clamp8(RGBA::r(val));
const int32_t g = clamp8(RGBA::g(val));
const int32_t b = clamp8(RGBA::b(val));
const uint32_t orig = dst[y * width + x];
const int32_t ar = (RGBA::r(orig) << 10) + 447 * r + 460 * g + 168 * b;
const int32_t ag = (RGBA::g(orig) << 10) - 63 * r - 63 * g - 25 * b;
const int32_t ab = (RGBA::b(orig) << 10) - 49 * r - 51 * g - 17 * b;
dst[y * width + x] =
RGBA::pack(clamp8(ar >> 10), clamp8(ag > 0 ? ag >> 10 : 0), clamp8(ab > 0 ? ab >> 10 : 0), 0xff);
}
}
Status anaglyphColorLeft(uint32_t* dst, const uint32_t* src, const int64_t height, const int64_t width) {
const dim3 dimBlock2D(16, 16, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1);
hipLaunchKernelGGL(( anaglyphColorLeftKernel), dim3(dimGrid2D), dim3(dimBlock2D), 0, 0, dst, src, height, width);
return CUDA_STATUS;
}
__global__ void anaglyphColorRightKernel(uint32_t* dst, const uint32_t* src, const int64_t height,
const int64_t width) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
const uint32_t val = src[y * width + x];
const int32_t r = clamp8(RGBA::r(val));
const int32_t g = clamp8(RGBA::g(val));
const int32_t b = clamp8(RGBA::b(val));
const uint32_t orig = dst[y * width + x];
const int32_t ar = (RGBA::r(orig) << 10) - 11 * r - 33 * g - 7 * b;
const int32_t ag = (RGBA::g(orig) << 10) + 386 * r + 779 * g + 9 * b;
const int32_t ab = (RGBA::b(orig) << 10) - 27 * r - 95 * g + 1264 * b;
dst[y * width + x] =
RGBA::pack(clamp8(ar > 0 ? ar >> 10 : 0), clamp8(ag >> 10), clamp8(ab > 0 ? ab >> 10 : 0), 0xff);
}
}
Status anaglyphColorRight(uint32_t* dst, const uint32_t* src, const int64_t height, const int64_t width) {
const dim3 dimBlock2D(16, 16, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1);
hipLaunchKernelGGL(( anaglyphColorRightKernel), dim3(dimGrid2D), dim3(dimBlock2D), 0, 0, dst, src, height, width);
return CUDA_STATUS;
}
} // namespace Output
} // namespace VideoStitch
| b3101c6b0653f7ddac9fec64795d1f0d4deae7fb.cu | // Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include "anaglyph.hpp"
#include "backend/common/imageOps.hpp"
#include "cuda/util.hpp"
#include "cuda/error.hpp"
#include <cuda_runtime.h>
namespace VideoStitch {
using namespace Image;
namespace Output {
// http://www.site.uottawa.ca/~edubois/anaglyph/LeastSquaresHowToPhotoshop.pdf
__global__ void anaglyphColorLeftKernel(uint32_t* dst, const uint32_t* src, const int64_t height, const int64_t width) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
const uint32_t val = src[y * width + x];
const int32_t r = clamp8(RGBA::r(val));
const int32_t g = clamp8(RGBA::g(val));
const int32_t b = clamp8(RGBA::b(val));
const uint32_t orig = dst[y * width + x];
const int32_t ar = (RGBA::r(orig) << 10) + 447 * r + 460 * g + 168 * b;
const int32_t ag = (RGBA::g(orig) << 10) - 63 * r - 63 * g - 25 * b;
const int32_t ab = (RGBA::b(orig) << 10) - 49 * r - 51 * g - 17 * b;
dst[y * width + x] =
RGBA::pack(clamp8(ar >> 10), clamp8(ag > 0 ? ag >> 10 : 0), clamp8(ab > 0 ? ab >> 10 : 0), 0xff);
}
}
Status anaglyphColorLeft(uint32_t* dst, const uint32_t* src, const int64_t height, const int64_t width) {
const dim3 dimBlock2D(16, 16, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1);
anaglyphColorLeftKernel<<<dimGrid2D, dimBlock2D, 0, 0>>>(dst, src, height, width);
return CUDA_STATUS;
}
__global__ void anaglyphColorRightKernel(uint32_t* dst, const uint32_t* src, const int64_t height,
const int64_t width) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
const uint32_t val = src[y * width + x];
const int32_t r = clamp8(RGBA::r(val));
const int32_t g = clamp8(RGBA::g(val));
const int32_t b = clamp8(RGBA::b(val));
const uint32_t orig = dst[y * width + x];
const int32_t ar = (RGBA::r(orig) << 10) - 11 * r - 33 * g - 7 * b;
const int32_t ag = (RGBA::g(orig) << 10) + 386 * r + 779 * g + 9 * b;
const int32_t ab = (RGBA::b(orig) << 10) - 27 * r - 95 * g + 1264 * b;
dst[y * width + x] =
RGBA::pack(clamp8(ar > 0 ? ar >> 10 : 0), clamp8(ag >> 10), clamp8(ab > 0 ? ab >> 10 : 0), 0xff);
}
}
Status anaglyphColorRight(uint32_t* dst, const uint32_t* src, const int64_t height, const int64_t width) {
const dim3 dimBlock2D(16, 16, 1);
const dim3 dimGrid2D((unsigned)Cuda::ceilDiv(width, dimBlock2D.x), (unsigned)Cuda::ceilDiv(height, dimBlock2D.y), 1);
anaglyphColorRightKernel<<<dimGrid2D, dimBlock2D, 0, 0>>>(dst, src, height, width);
return CUDA_STATUS;
}
} // namespace Output
} // namespace VideoStitch
|
f3aa10e070054123c6b9cf9b4db7f1fe07162302.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// If no MPI, then this whole file is stubbed out
#if USE_MPI
#include <mpi.h>
#include <string.h>
#endif
#include "lulesh.h"
#if USE_MPI
/* Comm Routines */
#define ALLOW_UNPACKED_PLANE false
#define ALLOW_UNPACKED_ROW false
#define ALLOW_UNPACKED_COL false
/*
There are coherence issues for packing and unpacking message
buffers. Ideally, you would like a lot of threads to
cooperate in the assembly/dissassembly of each message.
To do that, each thread should really be operating in a
different coherence zone.
Let's assume we have three fields, f1 through f3, defined on
a 61x61x61 cube. If we want to send the block boundary
information for each field to each neighbor processor across
each cube face, then we have three cases for the
memory layout/coherence of data on each of the six cube
boundaries:
(a) Two of the faces will be in contiguous memory blocks
(b) Two of the faces will be comprised of pencils of
contiguous memory.
(c) Two of the faces will have large strides between
every value living on the face.
How do you pack and unpack this data in buffers to
simultaneous achieve the best memory efficiency and
the most thread independence?
Do do you pack field f1 through f3 tighly to reduce message
size? Do you align each field on a cache coherence boundary
within the message so that threads can pack and unpack each
field independently? For case (b), do you align each
boundary pencil of each field separately? This increases
the message size, but could improve cache coherence so
each pencil could be processed independently by a separate
thread with no conflicts.
Also, memory access for case (c) would best be done without
going through the cache (the stride is so large it just causes
a lot of useless cache evictions). Is it worth creating
a special case version of the packing algorithm that uses
non-coherent load/store opcodes?
*/
/******************************************/
template<int type>
__global__ void SendPlane(Real_t *destAddr, Real_t_x *srcAddr, Index_t sendCount, Index_t dx, Index_t dy, Index_t dz)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= sendCount) return;
int i, j;
switch (type) {
case 0:
i = tid;
destAddr[i] = srcAddr[i] ;
break;
case 1:
i = tid;
destAddr[i] = srcAddr[dx*dy*(dz - 1) + i] ;
break;
case 2:
i = tid / dx;
j = tid % dx;
destAddr[i*dx+j] = srcAddr[i*dx*dy + j] ;
break;
case 3:
i = tid / dx;
j = tid % dx;
destAddr[i*dx+j] = srcAddr[dx*(dy - 1) + i*dx*dy + j] ;
break;
case 4:
i = tid / dy;
j = tid % dy;
destAddr[i*dy + j] = srcAddr[i*dx*dy + j*dx] ;
break;
case 5:
i = tid / dy;
j = tid % dy;
destAddr[i*dy + j] = srcAddr[dx - 1 + i*dx*dy + j*dx] ;
break;
}
}
template<int type>
__global__ void AddPlane(Real_t *srcAddr, Real_t_x *destAddr, Index_t recvCount, Index_t dx, Index_t dy, Index_t dz)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= recvCount) return;
int i, j;
switch (type) {
case 0:
i = tid;
destAddr[i] += srcAddr[i] ;
break;
case 1:
i = tid;
destAddr[dx*dy*(dz - 1) + i] += srcAddr[i] ;
break;
case 2:
i = tid / dx;
j = tid % dx;
destAddr[i*dx*dy + j] += srcAddr[i*dx + j] ;
break;
case 3:
i = tid / dx;
j = tid % dx;
destAddr[dx*(dy - 1) + i*dx*dy + j] += srcAddr[i*dx + j] ;
break;
case 4:
i = tid / dy;
j = tid % dy;
destAddr[i*dx*dy + j*dx] += srcAddr[i*dy + j] ;
break;
case 5:
i = tid / dy;
j = tid % dy;
destAddr[dx - 1 + i*dx*dy + j*dx] += srcAddr[i*dy + j] ;
break;
}
}
template<int type>
__global__ void CopyPlane(Real_t *srcAddr, Real_t_x *destAddr, Index_t recvCount, Index_t dx, Index_t dy, Index_t dz)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= recvCount) return;
int i, j;
switch (type) {
case 0:
i = tid;
destAddr[i] = srcAddr[i] ;
break;
case 1:
i = tid;
destAddr[dx*dy*(dz - 1) + i] = srcAddr[i] ;
break;
case 2:
i = tid / dx;
j = tid % dx;
destAddr[i*dx*dy + j] = srcAddr[i*dx + j] ;
break;
case 3:
i = tid / dx;
j = tid % dx;
destAddr[dx*(dy - 1) + i*dx*dy + j] = srcAddr[i*dx + j] ;
break;
case 4:
i = tid / dy;
j = tid % dy;
destAddr[i*dx*dy + j*dx] = srcAddr[i*dy + j] ;
break;
case 5:
i = tid / dy;
j = tid % dy;
destAddr[dx - 1 + i*dx*dy + j*dx] = srcAddr[i*dy + j] ;
break;
}
}
template<int type>
__global__ void SendEdge(Real_t *destAddr, Real_t_x *srcAddr, Index_t sendCount, Index_t dx, Index_t dy, Index_t dz)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= sendCount) return;
switch (type) {
case 0:
destAddr[i] = srcAddr[i*dx*dy] ;
break;
case 1:
destAddr[i] = srcAddr[i] ;
break;
case 2:
destAddr[i] = srcAddr[i*dx] ;
break;
case 3:
destAddr[i] = srcAddr[dx*dy - 1 + i*dx*dy] ;
break;
case 4:
destAddr[i] = srcAddr[dx*(dy-1) + dx*dy*(dz-1) + i] ;
break;
case 5:
destAddr[i] = srcAddr[dx*dy*(dz-1) + dx - 1 + i*dx] ;
break;
case 6:
destAddr[i] = srcAddr[dx*(dy-1) + i*dx*dy] ;
break;
case 7:
destAddr[i] = srcAddr[dx*dy*(dz-1) + i] ;
break;
case 8:
destAddr[i] = srcAddr[dx*dy*(dz-1) + i*dx] ;
break;
case 9:
destAddr[i] = srcAddr[dx - 1 + i*dx*dy] ;
break;
case 10:
destAddr[i] = srcAddr[dx*(dy - 1) + i] ;
break;
case 11:
destAddr[i] = srcAddr[dx - 1 + i*dx] ;
break;
}
}
template<int type>
__global__ void AddEdge(Real_t *srcAddr, Real_t_x *destAddr, Index_t recvCount, Index_t dx, Index_t dy, Index_t dz)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= recvCount) return;
switch (type) {
case 0:
destAddr[i*dx*dy] += srcAddr[i] ;
break;
case 1:
destAddr[i] += srcAddr[i] ;
break;
case 2:
destAddr[i*dx] += srcAddr[i] ;
break;
case 3:
destAddr[dx*dy - 1 + i*dx*dy] += srcAddr[i] ;
break;
case 4:
destAddr[dx*(dy-1) + dx*dy*(dz-1) + i] += srcAddr[i] ;
break;
case 5:
destAddr[dx*dy*(dz-1) + dx - 1 + i*dx] += srcAddr[i] ;
break;
case 6:
destAddr[dx*(dy-1) + i*dx*dy] += srcAddr[i] ;
break;
case 7:
destAddr[dx*dy*(dz-1) + i] += srcAddr[i] ;
break;
case 8:
destAddr[dx*dy*(dz-1) + i*dx] += srcAddr[i] ;
break;
case 9:
destAddr[dx - 1 + i*dx*dy] += srcAddr[i] ;
break;
case 10:
destAddr[dx*(dy - 1) + i] += srcAddr[i] ;
break;
case 11:
destAddr[dx - 1 + i*dx] += srcAddr[i] ;
break;
}
}
template<int type>
__global__ void CopyEdge(Real_t *srcAddr, Real_t_x *destAddr, Index_t recvCount, Index_t dx, Index_t dy, Index_t dz)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= recvCount) return;
switch (type) {
case 0:
destAddr[i*dx*dy] = srcAddr[i] ;
break;
case 1:
destAddr[i] = srcAddr[i] ;
break;
case 2:
destAddr[i*dx] = srcAddr[i] ;
break;
case 3:
destAddr[dx*dy - 1 + i*dx*dy] = srcAddr[i] ;
break;
case 4:
destAddr[dx*(dy-1) + dx*dy*(dz-1) + i] = srcAddr[i] ;
break;
case 5:
destAddr[dx*dy*(dz-1) + dx - 1 + i*dx] = srcAddr[i] ;
break;
case 6:
destAddr[dx*(dy-1) + i*dx*dy] = srcAddr[i] ;
break;
case 7:
destAddr[dx*dy*(dz-1) + i] = srcAddr[i] ;
break;
case 8:
destAddr[dx*dy*(dz-1) + i*dx] = srcAddr[i] ;
break;
case 9:
destAddr[dx - 1 + i*dx*dy] = srcAddr[i] ;
break;
case 10:
destAddr[dx*(dy - 1) + i] = srcAddr[i] ;
break;
case 11:
destAddr[dx - 1 + i*dx] = srcAddr[i] ;
break;
}
}
__global__ void AddCorner(Real_t_x *destAddr, Real_t src)
{
destAddr[0] += src;
}
__global__ void CopyCorner(Real_t_x *destAddr, Real_t src)
{
destAddr[0] = src;
}
/******************************************/
void CommSendGpu(Domain& domain, int msgType,
Index_t xferFields, Domain_member *fieldData,
Index_t dx, Index_t dy, Index_t dz, bool doSend, bool planeOnly, hipStream_t stream)
{
if (domain.numRanks() == 1)
return ;
/* post recieve buffers for all incoming messages */
int myRank ;
Index_t maxPlaneComm = xferFields * domain.maxPlaneSize ;
Index_t maxEdgeComm = xferFields * domain.maxEdgeSize ;
Index_t pmsg = 0 ; /* plane comm msg */
Index_t emsg = 0 ; /* edge comm msg */
Index_t cmsg = 0 ; /* corner comm msg */
MPI_Datatype baseType = ((sizeof(Real_t) == 4) ? MPI_FLOAT : MPI_DOUBLE) ;
MPI_Status status[26] ;
Real_t *destAddr ;
Real_t *d_destAddr ;
bool rowMin, rowMax, colMin, colMax, planeMin, planeMax ;
/* assume communication to 6 neighbors by default */
rowMin = rowMax = colMin = colMax = planeMin = planeMax = true ;
if (domain.rowLoc() == 0) {
rowMin = false ;
}
if (domain.rowLoc() == (domain.tp()-1)) {
rowMax = false ;
}
if (domain.colLoc() == 0) {
colMin = false ;
}
if (domain.colLoc() == (domain.tp()-1)) {
colMax = false ;
}
if (domain.planeLoc() == 0) {
planeMin = false ;
}
if (domain.planeLoc() == (domain.tp()-1)) {
planeMax = false ;
}
for (Index_t i=0; i<26; ++i) {
domain.sendRequest[i] = MPI_REQUEST_NULL ;
}
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
// setup launch grid
const int block = 128;
/* post sends */
if (planeMin | planeMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
int sendCount = dx * dy ;
if (planeMin) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendPlane<0>), dim3((sendCount+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank - domain.tp()*domain.tp(), msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
if (planeMax && doSend) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendPlane<1>), dim3((sendCount+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank + domain.tp()*domain.tp(), msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
}
if (rowMin | rowMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
int sendCount = dx * dz ;
if (rowMin) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendPlane<2>), dim3((sendCount+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank - domain.tp(), msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
if (rowMax && doSend) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendPlane<3>), dim3((sendCount+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank + domain.tp(), msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
}
if (colMin | colMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
int sendCount = dy * dz ;
if (colMin) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendPlane<4>), dim3((sendCount+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank - 1, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
if (colMax && doSend) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendPlane<5>), dim3((sendCount+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank + 1, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
}
if (!planeOnly) {
if (rowMin && colMin) {
int toRank = myRank - domain.tp() - 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<0>), dim3((dz+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dz, dx, dy, dz);
d_destAddr += dz ;
}
d_destAddr -= xferFields*dz ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dz*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dz, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMin && planeMin) {
int toRank = myRank - domain.tp()*domain.tp() - domain.tp() ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<1>), dim3((dx+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dx, dx, dy, dz);
d_destAddr += dx ;
}
d_destAddr -= xferFields*dx ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dx*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dx, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (colMin && planeMin) {
int toRank = myRank - domain.tp()*domain.tp() - 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<2>), dim3((dy+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dy, dx, dy, dz);
d_destAddr += dy ;
}
d_destAddr -= xferFields*dy ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dy*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dy, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMax && colMax && doSend) {
int toRank = myRank + domain.tp() + 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<3>), dim3((dz+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dz, dx, dy, dz);
d_destAddr += dz ;
}
d_destAddr -= xferFields*dz ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dz*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dz, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMax && planeMax && doSend) {
int toRank = myRank + domain.tp()*domain.tp() + domain.tp() ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<4>), dim3((dx+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dx, dx, dy, dz);
d_destAddr += dx ;
}
d_destAddr -= xferFields*dx ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dx*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dx, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (colMax && planeMax && doSend) {
int toRank = myRank + domain.tp()*domain.tp() + 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<5>), dim3((dy+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dy, dx, dy, dz);
d_destAddr += dy ;
}
d_destAddr -= xferFields*dy ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dy*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dy, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMax && colMin && doSend) {
int toRank = myRank + domain.tp() - 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<6>), dim3((dz+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dz, dx, dy, dz);
d_destAddr += dz ;
}
d_destAddr -= xferFields*dz ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dz*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dz, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMin && planeMax && doSend) {
int toRank = myRank + domain.tp()*domain.tp() - domain.tp() ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<7>), dim3((dx+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dx, dx, dy, dz);
d_destAddr += dx ;
}
d_destAddr -= xferFields*dx ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dx*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dx, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (colMin && planeMax && doSend) {
int toRank = myRank + domain.tp()*domain.tp() - 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<8>), dim3((dy+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dy, dx, dy, dz);
d_destAddr += dy ;
}
d_destAddr -= xferFields*dy ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dy*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dy, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMin && colMax) {
int toRank = myRank - domain.tp() + 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<9>), dim3((dz+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dz, dx, dy, dz);
d_destAddr += dz ;
}
d_destAddr -= xferFields*dz ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dz*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dz, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMax && planeMin) {
int toRank = myRank - domain.tp()*domain.tp() + domain.tp() ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<10>), dim3((dx+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dx, dx, dy, dz);
d_destAddr += dx ;
}
d_destAddr -= xferFields*dx ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dx*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dx, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (colMax && planeMin) {
int toRank = myRank - domain.tp()*domain.tp() + 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
hipLaunchKernelGGL(( SendEdge<11>), dim3((dy+block-1)/block),dim3(block),0,stream, d_destAddr, &(domain.*src)(0), dy, dx, dy, dz);
d_destAddr += dy ;
}
d_destAddr -= xferFields*dy ;
hipMemcpyAsync(destAddr, d_destAddr, xferFields*dy*sizeof(Real_t), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dy, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMin && colMin && planeMin) {
/* corner at domain logical coord (0, 0, 0) */
int toRank = myRank - domain.tp()*domain.tp() - domain.tp() - 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(0), sizeof(Real_t), hipMemcpyDeviceToHost, stream);
}
hipStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMin && colMin && planeMax && doSend) {
/* corner at domain logical coord (0, 0, 1) */
int toRank = myRank + domain.tp()*domain.tp() - domain.tp() - 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), hipMemcpyDeviceToHost, stream);
}
hipStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMin && colMax && planeMin) {
/* corner at domain logical coord (1, 0, 0) */
int toRank = myRank - domain.tp()*domain.tp() - domain.tp() + 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx - 1 ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), hipMemcpyDeviceToHost, stream);
}
hipStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMin && colMax && planeMax && doSend) {
/* corner at domain logical coord (1, 0, 1) */
int toRank = myRank + domain.tp()*domain.tp() - domain.tp() + 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + (dx - 1) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), hipMemcpyDeviceToHost, stream);
}
hipStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMax && colMin && planeMin) {
/* corner at domain logical coord (0, 1, 0) */
int toRank = myRank - domain.tp()*domain.tp() + domain.tp() - 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*(dy - 1) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), hipMemcpyDeviceToHost, stream);
}
hipStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMax && colMin && planeMax && doSend) {
/* corner at domain logical coord (0, 1, 1) */
int toRank = myRank + domain.tp()*domain.tp() + domain.tp() - 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + dx*(dy - 1) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), hipMemcpyDeviceToHost, stream);
}
hipStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMax && colMax && planeMin) {
/* corner at domain logical coord (1, 1, 0) */
int toRank = myRank - domain.tp()*domain.tp() + domain.tp() + 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy - 1 ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), hipMemcpyDeviceToHost, stream);
}
hipStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMax && colMax && planeMax && doSend) {
/* corner at domain logical coord (1, 1, 1) */
int toRank = myRank + domain.tp()*domain.tp() + domain.tp() + 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*dz - 1 ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), hipMemcpyDeviceToHost, stream);
}
hipStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
}
MPI_Waitall(26, domain.sendRequest, status) ;
}
/******************************************/
void CommSBNGpu(Domain& domain, int xferFields, Domain_member *fieldData, hipStream_t *streams) {
if (domain.numRanks() == 1)
return ;
/* summation order should be from smallest value to largest */
/* or we could try out kahan summation! */
int myRank ;
Index_t maxPlaneComm = xferFields * domain.maxPlaneSize ;
Index_t maxEdgeComm = xferFields * domain.maxEdgeSize ;
Index_t pmsg = 0 ; /* plane comm msg */
Index_t emsg = 0 ; /* edge comm msg */
Index_t cmsg = 0 ; /* corner comm msg */
Index_t dx = domain.sizeX + 1 ;
Index_t dy = domain.sizeY + 1 ;
Index_t dz = domain.sizeZ + 1 ;
MPI_Status status ;
Real_t *srcAddr ;
Real_t *d_srcAddr ;
Index_t rowMin, rowMax, colMin, colMax, planeMin, planeMax ;
/* assume communication to 6 neighbors by default */
rowMin = rowMax = colMin = colMax = planeMin = planeMax = 1 ;
if (domain.rowLoc() == 0) {
rowMin = 0 ;
}
if (domain.rowLoc() == (domain.tp()-1)) {
rowMax = 0 ;
}
if (domain.colLoc() == 0) {
colMin = 0 ;
}
if (domain.colLoc() == (domain.tp()-1)) {
colMax = 0 ;
}
if (domain.planeLoc() == 0) {
planeMin = 0 ;
}
if (domain.planeLoc() == (domain.tp()-1)) {
planeMax = 0 ;
}
// setup launch grid
const int block = 128;
// streams
int s = 0;
hipStream_t stream;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
if (planeMin | planeMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dy ;
if (planeMin) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddPlane<0>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (planeMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddPlane<1>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (rowMin | rowMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dz ;
if (rowMin) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddPlane<2>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (rowMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddPlane<3>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (colMin | colMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dy * dz ;
if (colMin) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddPlane<4>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (colMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddPlane<5>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (rowMin & colMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<0>), dim3((dz+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMin & planeMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<1>), dim3((dx+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMin & planeMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<2>), dim3((dy+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMax & colMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<3>), dim3((dz+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMax & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<4>), dim3((dx+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMax & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<5>), dim3((dy+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMax & colMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<6>), dim3((dz+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMin & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<7>), dim3((dx+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMin & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<8>), dim3((dy+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMin & colMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<9>), dim3((dz+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMax & planeMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<10>), dim3((dx+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMax & planeMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( AddEdge<11>), dim3((dy+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMin & colMin & planeMin) {
stream = streams[s++];
/* corner at domain logical coord (0, 0, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( AddCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(0), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMin & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (0, 0, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( AddCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMax & planeMin) {
stream = streams[s++];
/* corner at domain logical coord (1, 0, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( AddCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMax & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (1, 0, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + (dx - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( AddCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMin & planeMin) {
stream = streams[s++];
/* corner at domain logical coord (0, 1, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*(dy - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( AddCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMin & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (0, 1, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + dx*(dy - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( AddCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMax & planeMin) {
stream = streams[s++];
/* corner at domain logical coord (1, 1, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( AddCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMax & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (1, 1, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*dz - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( AddCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
// don't need to call synchronize since it will be done automatically
// before kernels start to execute in NULL stream
}
/******************************************/
void CommSyncPosVelGpu(Domain& domain, hipStream_t *streams) {
if (domain.numRanks() == 1)
return ;
int myRank ;
bool doRecv = false ;
Index_t xferFields = 6 ; /* x, y, z, xd, yd, zd */
Domain_member fieldData[6] ;
Index_t maxPlaneComm = xferFields * domain.maxPlaneSize ;
Index_t maxEdgeComm = xferFields * domain.maxEdgeSize ;
Index_t pmsg = 0 ; /* plane comm msg */
Index_t emsg = 0 ; /* edge comm msg */
Index_t cmsg = 0 ; /* corner comm msg */
Index_t dx = domain.sizeX + 1 ;
Index_t dy = domain.sizeY + 1 ;
Index_t dz = domain.sizeZ + 1 ;
MPI_Status status ;
Real_t *srcAddr ;
Real_t *d_srcAddr ;
bool rowMin, rowMax, colMin, colMax, planeMin, planeMax ;
/* assume communication to 6 neighbors by default */
rowMin = rowMax = colMin = colMax = planeMin = planeMax = true ;
if (domain.rowLoc() == 0) {
rowMin = false ;
}
if (domain.rowLoc() == (domain.tp()-1)) {
rowMax = false ;
}
if (domain.colLoc() == 0) {
colMin = false ;
}
if (domain.colLoc() == (domain.tp()-1)) {
colMax = false ;
}
if (domain.planeLoc() == 0) {
planeMin = false ;
}
if (domain.planeLoc() == (domain.tp()-1)) {
planeMax = false ;
}
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
// setup launch grid
const int block = 128;
// streams
int s = 0;
hipStream_t stream;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
if (planeMin | planeMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dy ;
if (planeMin && doRecv) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyPlane<0>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (planeMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyPlane<1>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (rowMin | rowMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dz ;
if (rowMin && doRecv) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyPlane<2>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (rowMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyPlane<3>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (colMin | colMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dy * dz ;
if (colMin && doRecv) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyPlane<4>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (colMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyPlane<5>), dim3((opCount+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (rowMin && colMin && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<0>), dim3((dz+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMin && planeMin && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<1>), dim3((dx+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMin && planeMin && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<2>), dim3((dy+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMax & colMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<3>), dim3((dz+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMax & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<4>), dim3((dx+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMax & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<5>), dim3((dy+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMax & colMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<6>), dim3((dz+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMin & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<7>), dim3((dx+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMin & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<8>), dim3((dy+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMin && colMax && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<9>), dim3((dz+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMax && planeMin && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<10>), dim3((dx+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMax && planeMin && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
hipMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), hipMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipLaunchKernelGGL(( CopyEdge<11>), dim3((dy+block-1)/block),dim3(block),0,stream, d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMin & colMin & planeMin & doRecv) {
stream = streams[s++];
/* corner at domain logical coord (0, 0, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( CopyCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(0), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMin & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (0, 0, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( CopyCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMax & planeMin & doRecv) {
stream = streams[s++];
/* corner at domain logical coord (1, 0, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( CopyCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMax & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (1, 0, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + (dx - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( CopyCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMin & planeMin & doRecv) {
stream = streams[s++];
/* corner at domain logical coord (0, 1, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*(dy - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( CopyCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMin & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (0, 1, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + dx*(dy - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( CopyCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMax & planeMin & doRecv) {
stream = streams[s++];
/* corner at domain logical coord (1, 1, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( CopyCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMax & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (1, 1, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*dz - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
hipLaunchKernelGGL(( CopyCorner), dim3(1),dim3(1),0,stream, &(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
// don't need to call synchronize since it will be done automatically
// before kernels start to execute in NULL stream
}
/******************************************/
void CommMonoQGpu(Domain& domain, hipStream_t stream)
{
if (domain.numRanks() == 1)
return ;
int myRank ;
Index_t xferFields = 3 ; /* delv_xi, delv_eta, delv_zeta */
Domain_member fieldData[3] ;
Index_t fieldOffset[3] ;
Index_t maxPlaneComm = xferFields * domain.maxPlaneSize ;
Index_t pmsg = 0 ; /* plane comm msg */
Index_t dx = domain.sizeX ;
Index_t dy = domain.sizeY ;
Index_t dz = domain.sizeZ ;
MPI_Status status ;
Real_t *srcAddr ;
bool rowMin, rowMax, colMin, colMax, planeMin, planeMax ;
/* assume communication to 6 neighbors by default */
rowMin = rowMax = colMin = colMax = planeMin = planeMax = true ;
if (domain.rowLoc() == 0) {
rowMin = false ;
}
if (domain.rowLoc() == (domain.tp()-1)) {
rowMax = false ;
}
if (domain.colLoc() == 0) {
colMin = false ;
}
if (domain.colLoc() == (domain.tp()-1)) {
colMax = false ;
}
if (domain.planeLoc() == 0) {
planeMin = false ;
}
if (domain.planeLoc() == (domain.tp()-1)) {
planeMax = false ;
}
/* point into ghost data area */
// fieldData[0] = &(domain.delv_xi(domain.numElem())) ;
// fieldData[1] = &(domain.delv_eta(domain.numElem())) ;
// fieldData[2] = &(domain.delv_zeta(domain.numElem())) ;
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
fieldOffset[0] = domain.numElem ;
fieldOffset[1] = domain.numElem ;
fieldOffset[2] = domain.numElem ;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
if (planeMin | planeMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dy ;
if (planeMin) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
srcAddr += opCount ;
fieldOffset[fi] += opCount ;
}
++pmsg ;
}
if (planeMax) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
srcAddr += opCount ;
fieldOffset[fi] += opCount ;
}
++pmsg ;
}
}
if (rowMin | rowMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dz ;
if (rowMin) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
srcAddr += opCount ;
fieldOffset[fi] += opCount ;
}
++pmsg ;
}
if (rowMax) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
srcAddr += opCount ;
fieldOffset[fi] += opCount ;
}
++pmsg ;
}
}
if (colMin | colMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dy * dz ;
if (colMin) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
srcAddr += opCount ;
fieldOffset[fi] += opCount ;
}
++pmsg ;
}
if (colMax) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
hipMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), hipMemcpyHostToDevice, stream);
srcAddr += opCount ;
}
++pmsg ;
}
}
// don't need to call synchronize since it will be done automatically
// before kernels start to execute in NULL stream
}
#endif
| f3aa10e070054123c6b9cf9b4db7f1fe07162302.cu |
// If no MPI, then this whole file is stubbed out
#if USE_MPI
#include <mpi.h>
#include <string.h>
#endif
#include "lulesh.h"
#if USE_MPI
/* Comm Routines */
#define ALLOW_UNPACKED_PLANE false
#define ALLOW_UNPACKED_ROW false
#define ALLOW_UNPACKED_COL false
/*
There are coherence issues for packing and unpacking message
buffers. Ideally, you would like a lot of threads to
cooperate in the assembly/dissassembly of each message.
To do that, each thread should really be operating in a
different coherence zone.
Let's assume we have three fields, f1 through f3, defined on
a 61x61x61 cube. If we want to send the block boundary
information for each field to each neighbor processor across
each cube face, then we have three cases for the
memory layout/coherence of data on each of the six cube
boundaries:
(a) Two of the faces will be in contiguous memory blocks
(b) Two of the faces will be comprised of pencils of
contiguous memory.
(c) Two of the faces will have large strides between
every value living on the face.
How do you pack and unpack this data in buffers to
simultaneous achieve the best memory efficiency and
the most thread independence?
Do do you pack field f1 through f3 tighly to reduce message
size? Do you align each field on a cache coherence boundary
within the message so that threads can pack and unpack each
field independently? For case (b), do you align each
boundary pencil of each field separately? This increases
the message size, but could improve cache coherence so
each pencil could be processed independently by a separate
thread with no conflicts.
Also, memory access for case (c) would best be done without
going through the cache (the stride is so large it just causes
a lot of useless cache evictions). Is it worth creating
a special case version of the packing algorithm that uses
non-coherent load/store opcodes?
*/
/******************************************/
template<int type>
__global__ void SendPlane(Real_t *destAddr, Real_t_x *srcAddr, Index_t sendCount, Index_t dx, Index_t dy, Index_t dz)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= sendCount) return;
int i, j;
switch (type) {
case 0:
i = tid;
destAddr[i] = srcAddr[i] ;
break;
case 1:
i = tid;
destAddr[i] = srcAddr[dx*dy*(dz - 1) + i] ;
break;
case 2:
i = tid / dx;
j = tid % dx;
destAddr[i*dx+j] = srcAddr[i*dx*dy + j] ;
break;
case 3:
i = tid / dx;
j = tid % dx;
destAddr[i*dx+j] = srcAddr[dx*(dy - 1) + i*dx*dy + j] ;
break;
case 4:
i = tid / dy;
j = tid % dy;
destAddr[i*dy + j] = srcAddr[i*dx*dy + j*dx] ;
break;
case 5:
i = tid / dy;
j = tid % dy;
destAddr[i*dy + j] = srcAddr[dx - 1 + i*dx*dy + j*dx] ;
break;
}
}
template<int type>
__global__ void AddPlane(Real_t *srcAddr, Real_t_x *destAddr, Index_t recvCount, Index_t dx, Index_t dy, Index_t dz)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= recvCount) return;
int i, j;
switch (type) {
case 0:
i = tid;
destAddr[i] += srcAddr[i] ;
break;
case 1:
i = tid;
destAddr[dx*dy*(dz - 1) + i] += srcAddr[i] ;
break;
case 2:
i = tid / dx;
j = tid % dx;
destAddr[i*dx*dy + j] += srcAddr[i*dx + j] ;
break;
case 3:
i = tid / dx;
j = tid % dx;
destAddr[dx*(dy - 1) + i*dx*dy + j] += srcAddr[i*dx + j] ;
break;
case 4:
i = tid / dy;
j = tid % dy;
destAddr[i*dx*dy + j*dx] += srcAddr[i*dy + j] ;
break;
case 5:
i = tid / dy;
j = tid % dy;
destAddr[dx - 1 + i*dx*dy + j*dx] += srcAddr[i*dy + j] ;
break;
}
}
template<int type>
__global__ void CopyPlane(Real_t *srcAddr, Real_t_x *destAddr, Index_t recvCount, Index_t dx, Index_t dy, Index_t dz)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= recvCount) return;
int i, j;
switch (type) {
case 0:
i = tid;
destAddr[i] = srcAddr[i] ;
break;
case 1:
i = tid;
destAddr[dx*dy*(dz - 1) + i] = srcAddr[i] ;
break;
case 2:
i = tid / dx;
j = tid % dx;
destAddr[i*dx*dy + j] = srcAddr[i*dx + j] ;
break;
case 3:
i = tid / dx;
j = tid % dx;
destAddr[dx*(dy - 1) + i*dx*dy + j] = srcAddr[i*dx + j] ;
break;
case 4:
i = tid / dy;
j = tid % dy;
destAddr[i*dx*dy + j*dx] = srcAddr[i*dy + j] ;
break;
case 5:
i = tid / dy;
j = tid % dy;
destAddr[dx - 1 + i*dx*dy + j*dx] = srcAddr[i*dy + j] ;
break;
}
}
template<int type>
__global__ void SendEdge(Real_t *destAddr, Real_t_x *srcAddr, Index_t sendCount, Index_t dx, Index_t dy, Index_t dz)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= sendCount) return;
switch (type) {
case 0:
destAddr[i] = srcAddr[i*dx*dy] ;
break;
case 1:
destAddr[i] = srcAddr[i] ;
break;
case 2:
destAddr[i] = srcAddr[i*dx] ;
break;
case 3:
destAddr[i] = srcAddr[dx*dy - 1 + i*dx*dy] ;
break;
case 4:
destAddr[i] = srcAddr[dx*(dy-1) + dx*dy*(dz-1) + i] ;
break;
case 5:
destAddr[i] = srcAddr[dx*dy*(dz-1) + dx - 1 + i*dx] ;
break;
case 6:
destAddr[i] = srcAddr[dx*(dy-1) + i*dx*dy] ;
break;
case 7:
destAddr[i] = srcAddr[dx*dy*(dz-1) + i] ;
break;
case 8:
destAddr[i] = srcAddr[dx*dy*(dz-1) + i*dx] ;
break;
case 9:
destAddr[i] = srcAddr[dx - 1 + i*dx*dy] ;
break;
case 10:
destAddr[i] = srcAddr[dx*(dy - 1) + i] ;
break;
case 11:
destAddr[i] = srcAddr[dx - 1 + i*dx] ;
break;
}
}
template<int type>
__global__ void AddEdge(Real_t *srcAddr, Real_t_x *destAddr, Index_t recvCount, Index_t dx, Index_t dy, Index_t dz)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= recvCount) return;
switch (type) {
case 0:
destAddr[i*dx*dy] += srcAddr[i] ;
break;
case 1:
destAddr[i] += srcAddr[i] ;
break;
case 2:
destAddr[i*dx] += srcAddr[i] ;
break;
case 3:
destAddr[dx*dy - 1 + i*dx*dy] += srcAddr[i] ;
break;
case 4:
destAddr[dx*(dy-1) + dx*dy*(dz-1) + i] += srcAddr[i] ;
break;
case 5:
destAddr[dx*dy*(dz-1) + dx - 1 + i*dx] += srcAddr[i] ;
break;
case 6:
destAddr[dx*(dy-1) + i*dx*dy] += srcAddr[i] ;
break;
case 7:
destAddr[dx*dy*(dz-1) + i] += srcAddr[i] ;
break;
case 8:
destAddr[dx*dy*(dz-1) + i*dx] += srcAddr[i] ;
break;
case 9:
destAddr[dx - 1 + i*dx*dy] += srcAddr[i] ;
break;
case 10:
destAddr[dx*(dy - 1) + i] += srcAddr[i] ;
break;
case 11:
destAddr[dx - 1 + i*dx] += srcAddr[i] ;
break;
}
}
template<int type>
__global__ void CopyEdge(Real_t *srcAddr, Real_t_x *destAddr, Index_t recvCount, Index_t dx, Index_t dy, Index_t dz)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= recvCount) return;
switch (type) {
case 0:
destAddr[i*dx*dy] = srcAddr[i] ;
break;
case 1:
destAddr[i] = srcAddr[i] ;
break;
case 2:
destAddr[i*dx] = srcAddr[i] ;
break;
case 3:
destAddr[dx*dy - 1 + i*dx*dy] = srcAddr[i] ;
break;
case 4:
destAddr[dx*(dy-1) + dx*dy*(dz-1) + i] = srcAddr[i] ;
break;
case 5:
destAddr[dx*dy*(dz-1) + dx - 1 + i*dx] = srcAddr[i] ;
break;
case 6:
destAddr[dx*(dy-1) + i*dx*dy] = srcAddr[i] ;
break;
case 7:
destAddr[dx*dy*(dz-1) + i] = srcAddr[i] ;
break;
case 8:
destAddr[dx*dy*(dz-1) + i*dx] = srcAddr[i] ;
break;
case 9:
destAddr[dx - 1 + i*dx*dy] = srcAddr[i] ;
break;
case 10:
destAddr[dx*(dy - 1) + i] = srcAddr[i] ;
break;
case 11:
destAddr[dx - 1 + i*dx] = srcAddr[i] ;
break;
}
}
__global__ void AddCorner(Real_t_x *destAddr, Real_t src)
{
destAddr[0] += src;
}
__global__ void CopyCorner(Real_t_x *destAddr, Real_t src)
{
destAddr[0] = src;
}
/******************************************/
void CommSendGpu(Domain& domain, int msgType,
Index_t xferFields, Domain_member *fieldData,
Index_t dx, Index_t dy, Index_t dz, bool doSend, bool planeOnly, cudaStream_t stream)
{
if (domain.numRanks() == 1)
return ;
/* post recieve buffers for all incoming messages */
int myRank ;
Index_t maxPlaneComm = xferFields * domain.maxPlaneSize ;
Index_t maxEdgeComm = xferFields * domain.maxEdgeSize ;
Index_t pmsg = 0 ; /* plane comm msg */
Index_t emsg = 0 ; /* edge comm msg */
Index_t cmsg = 0 ; /* corner comm msg */
MPI_Datatype baseType = ((sizeof(Real_t) == 4) ? MPI_FLOAT : MPI_DOUBLE) ;
MPI_Status status[26] ;
Real_t *destAddr ;
Real_t *d_destAddr ;
bool rowMin, rowMax, colMin, colMax, planeMin, planeMax ;
/* assume communication to 6 neighbors by default */
rowMin = rowMax = colMin = colMax = planeMin = planeMax = true ;
if (domain.rowLoc() == 0) {
rowMin = false ;
}
if (domain.rowLoc() == (domain.tp()-1)) {
rowMax = false ;
}
if (domain.colLoc() == 0) {
colMin = false ;
}
if (domain.colLoc() == (domain.tp()-1)) {
colMax = false ;
}
if (domain.planeLoc() == 0) {
planeMin = false ;
}
if (domain.planeLoc() == (domain.tp()-1)) {
planeMax = false ;
}
for (Index_t i=0; i<26; ++i) {
domain.sendRequest[i] = MPI_REQUEST_NULL ;
}
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
// setup launch grid
const int block = 128;
/* post sends */
if (planeMin | planeMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
int sendCount = dx * dy ;
if (planeMin) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendPlane<0><<<(sendCount+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank - domain.tp()*domain.tp(), msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
if (planeMax && doSend) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendPlane<1><<<(sendCount+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank + domain.tp()*domain.tp(), msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
}
if (rowMin | rowMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
int sendCount = dx * dz ;
if (rowMin) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendPlane<2><<<(sendCount+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank - domain.tp(), msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
if (rowMax && doSend) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendPlane<3><<<(sendCount+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank + domain.tp(), msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
}
if (colMin | colMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
int sendCount = dy * dz ;
if (colMin) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendPlane<4><<<(sendCount+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank - 1, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
if (colMax && doSend) {
destAddr = &domain.commDataSend[pmsg * maxPlaneComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendPlane<5><<<(sendCount+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), sendCount, dx, dy, dz);
d_destAddr += sendCount ;
}
d_destAddr -= xferFields*sendCount ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*sendCount*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*sendCount, baseType,
myRank + 1, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg]) ;
++pmsg ;
}
}
if (!planeOnly) {
if (rowMin && colMin) {
int toRank = myRank - domain.tp() - 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<0><<<(dz+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dz, dx, dy, dz);
d_destAddr += dz ;
}
d_destAddr -= xferFields*dz ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dz, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMin && planeMin) {
int toRank = myRank - domain.tp()*domain.tp() - domain.tp() ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<1><<<(dx+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dx, dx, dy, dz);
d_destAddr += dx ;
}
d_destAddr -= xferFields*dx ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dx, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (colMin && planeMin) {
int toRank = myRank - domain.tp()*domain.tp() - 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<2><<<(dy+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dy, dx, dy, dz);
d_destAddr += dy ;
}
d_destAddr -= xferFields*dy ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dy, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMax && colMax && doSend) {
int toRank = myRank + domain.tp() + 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<3><<<(dz+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dz, dx, dy, dz);
d_destAddr += dz ;
}
d_destAddr -= xferFields*dz ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dz, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMax && planeMax && doSend) {
int toRank = myRank + domain.tp()*domain.tp() + domain.tp() ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<4><<<(dx+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dx, dx, dy, dz);
d_destAddr += dx ;
}
d_destAddr -= xferFields*dx ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dx, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (colMax && planeMax && doSend) {
int toRank = myRank + domain.tp()*domain.tp() + 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<5><<<(dy+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dy, dx, dy, dz);
d_destAddr += dy ;
}
d_destAddr -= xferFields*dy ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dy, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMax && colMin && doSend) {
int toRank = myRank + domain.tp() - 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<6><<<(dz+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dz, dx, dy, dz);
d_destAddr += dz ;
}
d_destAddr -= xferFields*dz ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dz, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMin && planeMax && doSend) {
int toRank = myRank + domain.tp()*domain.tp() - domain.tp() ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<7><<<(dx+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dx, dx, dy, dz);
d_destAddr += dx ;
}
d_destAddr -= xferFields*dx ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dx, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (colMin && planeMax && doSend) {
int toRank = myRank + domain.tp()*domain.tp() - 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<8><<<(dy+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dy, dx, dy, dz);
d_destAddr += dy ;
}
d_destAddr -= xferFields*dy ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dy, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMin && colMax) {
int toRank = myRank - domain.tp() + 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<9><<<(dz+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dz, dx, dy, dz);
d_destAddr += dz ;
}
d_destAddr -= xferFields*dz ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dz, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMax && planeMin) {
int toRank = myRank - domain.tp()*domain.tp() + domain.tp() ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<10><<<(dx+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dx, dx, dy, dz);
d_destAddr += dx ;
}
d_destAddr -= xferFields*dx ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dx, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (colMax && planeMin) {
int toRank = myRank - domain.tp()*domain.tp() + 1 ;
destAddr = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_destAddr = &domain.d_commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
Domain_member src = fieldData[fi] ;
SendEdge<11><<<(dy+block-1)/block,block,0,stream>>>(d_destAddr, &(domain.*src)(0), dy, dx, dy, dz);
d_destAddr += dy ;
}
d_destAddr -= xferFields*dy ;
cudaMemcpyAsync(destAddr, d_destAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
MPI_Isend(destAddr, xferFields*dy, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg]) ;
++emsg ;
}
if (rowMin && colMin && planeMin) {
/* corner at domain logical coord (0, 0, 0) */
int toRank = myRank - domain.tp()*domain.tp() - domain.tp() - 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
for (Index_t fi=0; fi<xferFields; ++fi) {
cudaMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(0), sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMin && colMin && planeMax && doSend) {
/* corner at domain logical coord (0, 0, 1) */
int toRank = myRank + domain.tp()*domain.tp() - domain.tp() - 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
cudaMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMin && colMax && planeMin) {
/* corner at domain logical coord (1, 0, 0) */
int toRank = myRank - domain.tp()*domain.tp() - domain.tp() + 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx - 1 ;
for (Index_t fi=0; fi<xferFields; ++fi) {
cudaMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMin && colMax && planeMax && doSend) {
/* corner at domain logical coord (1, 0, 1) */
int toRank = myRank + domain.tp()*domain.tp() - domain.tp() + 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + (dx - 1) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
cudaMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMax && colMin && planeMin) {
/* corner at domain logical coord (0, 1, 0) */
int toRank = myRank - domain.tp()*domain.tp() + domain.tp() - 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*(dy - 1) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
cudaMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMax && colMin && planeMax && doSend) {
/* corner at domain logical coord (0, 1, 1) */
int toRank = myRank + domain.tp()*domain.tp() + domain.tp() - 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + dx*(dy - 1) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
cudaMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMax && colMax && planeMin) {
/* corner at domain logical coord (1, 1, 0) */
int toRank = myRank - domain.tp()*domain.tp() + domain.tp() + 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy - 1 ;
for (Index_t fi=0; fi<xferFields; ++fi) {
cudaMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
if (rowMax && colMax && planeMax && doSend) {
/* corner at domain logical coord (1, 1, 1) */
int toRank = myRank + domain.tp()*domain.tp() + domain.tp() + 1 ;
Real_t *comBuf = &domain.commDataSend[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*dz - 1 ;
for (Index_t fi=0; fi<xferFields; ++fi) {
cudaMemcpyAsync(&comBuf[fi], &(domain.*fieldData[fi])(idx), sizeof(Real_t), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
MPI_Isend(comBuf, xferFields, baseType, toRank, msgType,
MPI_COMM_WORLD, &domain.sendRequest[pmsg+emsg+cmsg]) ;
++cmsg ;
}
}
MPI_Waitall(26, domain.sendRequest, status) ;
}
/******************************************/
void CommSBNGpu(Domain& domain, int xferFields, Domain_member *fieldData, cudaStream_t *streams) {
if (domain.numRanks() == 1)
return ;
/* summation order should be from smallest value to largest */
/* or we could try out kahan summation! */
int myRank ;
Index_t maxPlaneComm = xferFields * domain.maxPlaneSize ;
Index_t maxEdgeComm = xferFields * domain.maxEdgeSize ;
Index_t pmsg = 0 ; /* plane comm msg */
Index_t emsg = 0 ; /* edge comm msg */
Index_t cmsg = 0 ; /* corner comm msg */
Index_t dx = domain.sizeX + 1 ;
Index_t dy = domain.sizeY + 1 ;
Index_t dz = domain.sizeZ + 1 ;
MPI_Status status ;
Real_t *srcAddr ;
Real_t *d_srcAddr ;
Index_t rowMin, rowMax, colMin, colMax, planeMin, planeMax ;
/* assume communication to 6 neighbors by default */
rowMin = rowMax = colMin = colMax = planeMin = planeMax = 1 ;
if (domain.rowLoc() == 0) {
rowMin = 0 ;
}
if (domain.rowLoc() == (domain.tp()-1)) {
rowMax = 0 ;
}
if (domain.colLoc() == 0) {
colMin = 0 ;
}
if (domain.colLoc() == (domain.tp()-1)) {
colMax = 0 ;
}
if (domain.planeLoc() == 0) {
planeMin = 0 ;
}
if (domain.planeLoc() == (domain.tp()-1)) {
planeMax = 0 ;
}
// setup launch grid
const int block = 128;
// streams
int s = 0;
cudaStream_t stream;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
if (planeMin | planeMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dy ;
if (planeMin) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddPlane<0><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (planeMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddPlane<1><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (rowMin | rowMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dz ;
if (rowMin) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddPlane<2><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (rowMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddPlane<3><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (colMin | colMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dy * dz ;
if (colMin) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddPlane<4><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (colMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddPlane<5><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (rowMin & colMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<0><<<(dz+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMin & planeMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<1><<<(dx+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMin & planeMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<2><<<(dy+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMax & colMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<3><<<(dz+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMax & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<4><<<(dx+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMax & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<5><<<(dy+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMax & colMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<6><<<(dz+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMin & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<7><<<(dx+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMin & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<8><<<(dy+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMin & colMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<9><<<(dz+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMax & planeMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<10><<<(dx+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMax & planeMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
AddEdge<11><<<(dy+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMin & colMin & planeMin) {
stream = streams[s++];
/* corner at domain logical coord (0, 0, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
AddCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(0), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMin & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (0, 0, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
AddCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMax & planeMin) {
stream = streams[s++];
/* corner at domain logical coord (1, 0, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
AddCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMax & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (1, 0, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + (dx - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
AddCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMin & planeMin) {
stream = streams[s++];
/* corner at domain logical coord (0, 1, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*(dy - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
AddCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMin & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (0, 1, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + dx*(dy - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
AddCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMax & planeMin) {
stream = streams[s++];
/* corner at domain logical coord (1, 1, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
AddCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMax & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (1, 1, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*dz - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
AddCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
// don't need to call synchronize since it will be done automatically
// before kernels start to execute in NULL stream
}
/******************************************/
void CommSyncPosVelGpu(Domain& domain, cudaStream_t *streams) {
if (domain.numRanks() == 1)
return ;
int myRank ;
bool doRecv = false ;
Index_t xferFields = 6 ; /* x, y, z, xd, yd, zd */
Domain_member fieldData[6] ;
Index_t maxPlaneComm = xferFields * domain.maxPlaneSize ;
Index_t maxEdgeComm = xferFields * domain.maxEdgeSize ;
Index_t pmsg = 0 ; /* plane comm msg */
Index_t emsg = 0 ; /* edge comm msg */
Index_t cmsg = 0 ; /* corner comm msg */
Index_t dx = domain.sizeX + 1 ;
Index_t dy = domain.sizeY + 1 ;
Index_t dz = domain.sizeZ + 1 ;
MPI_Status status ;
Real_t *srcAddr ;
Real_t *d_srcAddr ;
bool rowMin, rowMax, colMin, colMax, planeMin, planeMax ;
/* assume communication to 6 neighbors by default */
rowMin = rowMax = colMin = colMax = planeMin = planeMax = true ;
if (domain.rowLoc() == 0) {
rowMin = false ;
}
if (domain.rowLoc() == (domain.tp()-1)) {
rowMax = false ;
}
if (domain.colLoc() == 0) {
colMin = false ;
}
if (domain.colLoc() == (domain.tp()-1)) {
colMax = false ;
}
if (domain.planeLoc() == 0) {
planeMin = false ;
}
if (domain.planeLoc() == (domain.tp()-1)) {
planeMax = false ;
}
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
// setup launch grid
const int block = 128;
// streams
int s = 0;
cudaStream_t stream;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
if (planeMin | planeMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dy ;
if (planeMin && doRecv) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyPlane<0><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (planeMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyPlane<1><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (rowMin | rowMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dz ;
if (rowMin && doRecv) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyPlane<2><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (rowMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyPlane<3><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (colMin | colMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dy * dz ;
if (colMin && doRecv) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyPlane<4><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
if (colMax) {
/* contiguous memory */
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyPlane<5><<<(opCount+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), opCount, dx, dy, dz);
d_srcAddr += opCount ;
}
++pmsg ;
}
}
if (rowMin && colMin && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<0><<<(dz+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMin && planeMin && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<1><<<(dx+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMin && planeMin && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<2><<<(dy+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMax & colMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<3><<<(dz+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMax & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<4><<<(dx+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMax & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<5><<<(dy+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMax & colMin) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<6><<<(dz+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMin & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<7><<<(dx+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMin & planeMax) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<8><<<(dy+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMin && colMax && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dz*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<9><<<(dz+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dz, dx, dy, dz);
d_srcAddr += dz ;
}
++emsg ;
}
if (rowMax && planeMin && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dx*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<10><<<(dx+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dx, dx, dy, dz);
d_srcAddr += dx ;
}
++emsg ;
}
if (colMax && planeMin && doRecv) {
stream = streams[s++];
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
d_srcAddr = &domain.d_commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg], &status) ;
cudaMemcpyAsync(d_srcAddr, srcAddr, xferFields*dy*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
CopyEdge<11><<<(dy+block-1)/block,block,0,stream>>>(d_srcAddr, &(domain.*dest)(0), dy, dx, dy, dz);
d_srcAddr += dy ;
}
++emsg ;
}
if (rowMin & colMin & planeMin & doRecv) {
stream = streams[s++];
/* corner at domain logical coord (0, 0, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
CopyCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(0), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMin & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (0, 0, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
CopyCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMax & planeMin & doRecv) {
stream = streams[s++];
/* corner at domain logical coord (1, 0, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
CopyCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMin & colMax & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (1, 0, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + (dx - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
CopyCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMin & planeMin & doRecv) {
stream = streams[s++];
/* corner at domain logical coord (0, 1, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*(dy - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
CopyCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMin & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (0, 1, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*(dz - 1) + dx*(dy - 1) ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
CopyCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMax & planeMin & doRecv) {
stream = streams[s++];
/* corner at domain logical coord (1, 1, 0) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
CopyCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
if (rowMax & colMax & planeMax) {
stream = streams[s++];
/* corner at domain logical coord (1, 1, 1) */
Real_t *comBuf = &domain.commDataRecv[pmsg * maxPlaneComm +
emsg * maxEdgeComm +
cmsg * CACHE_COHERENCE_PAD_REAL] ;
Index_t idx = dx*dy*dz - 1 ;
MPI_Wait(&domain.recvRequest[pmsg+emsg+cmsg], &status) ;
for (Index_t fi=0; fi<xferFields; ++fi) {
CopyCorner<<<1,1,0,stream>>>(&(domain.*fieldData[fi])(idx), comBuf[fi]) ;
}
++cmsg ;
}
// don't need to call synchronize since it will be done automatically
// before kernels start to execute in NULL stream
}
/******************************************/
void CommMonoQGpu(Domain& domain, cudaStream_t stream)
{
if (domain.numRanks() == 1)
return ;
int myRank ;
Index_t xferFields = 3 ; /* delv_xi, delv_eta, delv_zeta */
Domain_member fieldData[3] ;
Index_t fieldOffset[3] ;
Index_t maxPlaneComm = xferFields * domain.maxPlaneSize ;
Index_t pmsg = 0 ; /* plane comm msg */
Index_t dx = domain.sizeX ;
Index_t dy = domain.sizeY ;
Index_t dz = domain.sizeZ ;
MPI_Status status ;
Real_t *srcAddr ;
bool rowMin, rowMax, colMin, colMax, planeMin, planeMax ;
/* assume communication to 6 neighbors by default */
rowMin = rowMax = colMin = colMax = planeMin = planeMax = true ;
if (domain.rowLoc() == 0) {
rowMin = false ;
}
if (domain.rowLoc() == (domain.tp()-1)) {
rowMax = false ;
}
if (domain.colLoc() == 0) {
colMin = false ;
}
if (domain.colLoc() == (domain.tp()-1)) {
colMax = false ;
}
if (domain.planeLoc() == 0) {
planeMin = false ;
}
if (domain.planeLoc() == (domain.tp()-1)) {
planeMax = false ;
}
/* point into ghost data area */
// fieldData[0] = &(domain.delv_xi(domain.numElem())) ;
// fieldData[1] = &(domain.delv_eta(domain.numElem())) ;
// fieldData[2] = &(domain.delv_zeta(domain.numElem())) ;
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
fieldOffset[0] = domain.numElem ;
fieldOffset[1] = domain.numElem ;
fieldOffset[2] = domain.numElem ;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
if (planeMin | planeMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dy ;
if (planeMin) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
cudaMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
srcAddr += opCount ;
fieldOffset[fi] += opCount ;
}
++pmsg ;
}
if (planeMax) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
cudaMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
srcAddr += opCount ;
fieldOffset[fi] += opCount ;
}
++pmsg ;
}
}
if (rowMin | rowMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dx * dz ;
if (rowMin) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
cudaMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
srcAddr += opCount ;
fieldOffset[fi] += opCount ;
}
++pmsg ;
}
if (rowMax) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
cudaMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
srcAddr += opCount ;
fieldOffset[fi] += opCount ;
}
++pmsg ;
}
}
if (colMin | colMax) {
/* ASSUMING ONE DOMAIN PER RANK, CONSTANT BLOCK SIZE HERE */
Index_t opCount = dy * dz ;
if (colMin) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
cudaMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
srcAddr += opCount ;
fieldOffset[fi] += opCount ;
}
++pmsg ;
}
if (colMax) {
/* contiguous memory */
srcAddr = &domain.commDataRecv[pmsg * maxPlaneComm] ;
MPI_Wait(&domain.recvRequest[pmsg], &status) ;
for (Index_t fi=0 ; fi<xferFields; ++fi) {
Domain_member dest = fieldData[fi] ;
cudaMemcpyAsync(&(domain.*dest)(fieldOffset[fi]), srcAddr, opCount*sizeof(Real_t), cudaMemcpyHostToDevice, stream);
srcAddr += opCount ;
}
++pmsg ;
}
}
// don't need to call synchronize since it will be done automatically
// before kernels start to execute in NULL stream
}
#endif
|
c309806cbb25649283fb708c29c2d6b6c864a8cb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void j3d27pt (double * __restrict__ t_in, double * __restrict__ t_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 1;
int i = max(i0,1) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 1;
int j = max(j0,1) + 4*(int)(threadIdx.y);
int k0 = (int)(blockIdx.z)*(int)(blockDim.z) + 1;
int k = max(k0,1) + (int)(threadIdx.z);
double (*in)[514][514] = (double (*)[514][514])t_in;
double (*out)[514][514] = (double (*)[514][514])t_out;
if (i<=N-2 & j<=N-2 && k<=N-2) {
#pragma unroll 4
for (int jj=0; jj<=3; jj++) {
out[k][j+jj][i] = 0.125 * in[k][j+jj][i] +
1.14 * (in[k-1][j+jj][i] + in[k+1][j+jj][i] + in[k][j+jj-1][i] +
in[k][j+jj+1][i] + in[k][j+jj][i-1] + in[k][j+jj][i+1]) +
0.75 * (in[k-1][j+jj-1][i-1] + in[k-1][j+jj-1][i+1] + in[k-1][j+jj+1][i-1] +
in[k-1][j+jj+1][i+1] + in[k+1][j+jj-1][i-1] + in[k+1][j+jj-1][i+1] +
in[k+1][j+jj+1][i-1] + in[k+1][j+jj+1][i+1]) +
1.031 * (in[k-1][j+jj-1][i] + in[k-1][j+jj][i-1] + in[k-1][j+jj][i+1] +
in[k-1][j+jj+1][i] + in[k][j+jj-1][i-1] + in[k][j+jj-1][i+1] +
in[k][j+jj+1][i-1] + in[k][j+jj+1][i+1] + in[k+1][j+jj-1][i] +
in[k+1][j+jj][i-1] + in[k+1][j+jj][i+1] + in[k+1][j+jj+1][i]);
}
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
hipMalloc (&in, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for in\n");
hipMemcpy (in, h_in, sizeof(double)*N*N*N, hipMemcpyHostToDevice);
double *out;
hipMalloc (&out, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N-2, blockconfig.x), ceil(N-2, 4*blockconfig.y), ceil(N-2, blockconfig.z));
hipLaunchKernelGGL(( j3d27pt), dim3(gridconfig), dim3(blockconfig), 0, 0, in, out, N);
hipMemcpy (h_out, out, sizeof(double)*N*N*N, hipMemcpyDeviceToHost);
hipFree (in);
hipFree (out);
}
| c309806cbb25649283fb708c29c2d6b6c864a8cb.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void j3d27pt (double * __restrict__ t_in, double * __restrict__ t_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 1;
int i = max(i0,1) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 1;
int j = max(j0,1) + 4*(int)(threadIdx.y);
int k0 = (int)(blockIdx.z)*(int)(blockDim.z) + 1;
int k = max(k0,1) + (int)(threadIdx.z);
double (*in)[514][514] = (double (*)[514][514])t_in;
double (*out)[514][514] = (double (*)[514][514])t_out;
if (i<=N-2 & j<=N-2 && k<=N-2) {
#pragma unroll 4
for (int jj=0; jj<=3; jj++) {
out[k][j+jj][i] = 0.125 * in[k][j+jj][i] +
1.14 * (in[k-1][j+jj][i] + in[k+1][j+jj][i] + in[k][j+jj-1][i] +
in[k][j+jj+1][i] + in[k][j+jj][i-1] + in[k][j+jj][i+1]) +
0.75 * (in[k-1][j+jj-1][i-1] + in[k-1][j+jj-1][i+1] + in[k-1][j+jj+1][i-1] +
in[k-1][j+jj+1][i+1] + in[k+1][j+jj-1][i-1] + in[k+1][j+jj-1][i+1] +
in[k+1][j+jj+1][i-1] + in[k+1][j+jj+1][i+1]) +
1.031 * (in[k-1][j+jj-1][i] + in[k-1][j+jj][i-1] + in[k-1][j+jj][i+1] +
in[k-1][j+jj+1][i] + in[k][j+jj-1][i-1] + in[k][j+jj-1][i+1] +
in[k][j+jj+1][i-1] + in[k][j+jj+1][i+1] + in[k+1][j+jj-1][i] +
in[k+1][j+jj][i-1] + in[k+1][j+jj][i+1] + in[k+1][j+jj+1][i]);
}
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N-2, blockconfig.x), ceil(N-2, 4*blockconfig.y), ceil(N-2, blockconfig.z));
j3d27pt<<<gridconfig, blockconfig>>> (in, out, N);
cudaMemcpy (h_out, out, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (in);
cudaFree (out);
}
|
ab94c54755a63cc5562db1bce646d4b5490e31a3.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <wrapper/util/util.h>
namespace wrapper {
namespace util {
void DeviceInit(CommandLineArgs &args)
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "No devices supporting CUDA.\n");
exit(1);
}
int dev = 0;
args.GetCmdLineArgument("device", dev);
if (dev < 0) {
dev = 0;
}
if (dev > deviceCount - 1) {
dev = deviceCount - 1;
}
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (deviceProp.major < 1) {
fprintf(stderr, "Device does not support CUDA.\n");
exit(1);
}
if (!args.CheckCmdLineFlag("quiet")) {
printf("Using device %d: %s\n", dev, deviceProp.name);
}
hipSetDevice(dev);
}
}
}
| ab94c54755a63cc5562db1bce646d4b5490e31a3.cu | #pragma once
#include <wrapper/util/util.h>
namespace wrapper {
namespace util {
void DeviceInit(CommandLineArgs &args)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "No devices supporting CUDA.\n");
exit(1);
}
int dev = 0;
args.GetCmdLineArgument("device", dev);
if (dev < 0) {
dev = 0;
}
if (dev > deviceCount - 1) {
dev = deviceCount - 1;
}
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (deviceProp.major < 1) {
fprintf(stderr, "Device does not support CUDA.\n");
exit(1);
}
if (!args.CheckCmdLineFlag("quiet")) {
printf("Using device %d: %s\n", dev, deviceProp.name);
}
cudaSetDevice(dev);
}
}
}
|
6a74915bea8a98ea90fbc9cc11cba176b8923b1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dot_product.cuh"
#define BLOCKSIZE 1024
__global__ void dotProduct_dVector_kernel(double *a, double *b, double *partial_sum, int n) {
__shared__ double partial_sums[BLOCKSIZE];
double local_sum = 0;
int id = blockIdx.x*blockDim.x + threadIdx.x;
int partial_index = threadIdx.x;
while (id < n) {
local_sum += (a[id] * b[id]);
id += (blockDim.x * gridDim.x); // this thread may have to handle multiple sums
}
partial_sums[partial_index] = local_sum;
__syncthreads();
int sum_level = blockDim.x >> 1; // divide by 2
while (sum_level != 0) {
if (partial_index < sum_level) {
partial_sums[partial_index] += partial_sums[partial_index + sum_level];
}
__syncthreads();
sum_level >>= 1; // divide by 2
}
if (partial_index == 0) {
// if we are the thread processing index 0 of partial_sums for our block
partial_sum[blockIdx.x] = partial_sums[0];
}
// at this point there is still some partial somes left to compute
// inefficient to do so on GPU. Let CPU do this
}
int dotProduct_dVectors(dVector a, dVector b, double *result) {
if (a.len != b.len) {
fprintf(stderr, "Vector length mismatch\n");
return -1;
}
int err = 0;
double *device_a, *device_b, *device_partial, *host_partial;
int sizeInBytes = a.len * sizeof(double);
hipError_t cudaStatus;
int gridSize = (int)ceil((float)a.len / BLOCKSIZE);
host_partial = (double *)calloc(gridSize, sizeof(double));
cudaStatus = hipMalloc(&device_a, sizeInBytes);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "A: hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc(&device_b, sizeInBytes);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "B: hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc(&device_partial, gridSize * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "device_partial: hipMalloc failed!\n");
goto Error;
}
hipMemcpy(device_a, a.data, sizeInBytes, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "A: hipMemcpy to device failed!\n");
goto Error;
}
hipMemcpy(device_b, b.data, sizeInBytes, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "B: hipMemcpy to device failed!\n");
goto Error;
}
hipLaunchKernelGGL(( dotProduct_dVector_kernel), dim3(gridSize), dim3(BLOCKSIZE) , 0, 0, device_a, device_b, device_partial, a.len);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "dotProduct_dVector_kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
hipMemcpy(host_partial, device_partial, gridSize * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "partial: hipMemcpy to host failed!\n");
goto Error;
}
Error:
hipFree(device_a);
hipFree(device_b);
hipFree(device_partial);
if (cudaStatus != hipSuccess) {
err = -1;
goto Exit;
}
double sum = 0;
for (int i = 0; i < gridSize; ++i) {
sum += host_partial[i];
}
*result = sum;
Exit:
free(host_partial);
return err;
}
void test_dotProduct(int n) {
dVector a, b, c;
dvector_init(a, n);
dvector_init(b, n);
double dotProduct = 0;
for (int i = 0; i < n; ++i) {
a.data[i] = 1;
b.data[i] = 2;
}
dotProduct_dVectors(a, b, &dotProduct);
//dotProduct should be 2*n
if (abs(dotProduct - 2 * n) > 1e-14) {
fprintf(stdout, "Dot Product=%f is not correct within error.\n", dotProduct);
}
else {
fprintf(stdout, "Dot Product=%f is correct within error.\n", dotProduct);
}
dvector_free(a);
dvector_free(b);
} | 6a74915bea8a98ea90fbc9cc11cba176b8923b1b.cu | #include "dot_product.cuh"
#define BLOCKSIZE 1024
__global__ void dotProduct_dVector_kernel(double *a, double *b, double *partial_sum, int n) {
__shared__ double partial_sums[BLOCKSIZE];
double local_sum = 0;
int id = blockIdx.x*blockDim.x + threadIdx.x;
int partial_index = threadIdx.x;
while (id < n) {
local_sum += (a[id] * b[id]);
id += (blockDim.x * gridDim.x); // this thread may have to handle multiple sums
}
partial_sums[partial_index] = local_sum;
__syncthreads();
int sum_level = blockDim.x >> 1; // divide by 2
while (sum_level != 0) {
if (partial_index < sum_level) {
partial_sums[partial_index] += partial_sums[partial_index + sum_level];
}
__syncthreads();
sum_level >>= 1; // divide by 2
}
if (partial_index == 0) {
// if we are the thread processing index 0 of partial_sums for our block
partial_sum[blockIdx.x] = partial_sums[0];
}
// at this point there is still some partial somes left to compute
// inefficient to do so on GPU. Let CPU do this
}
int dotProduct_dVectors(dVector a, dVector b, double *result) {
if (a.len != b.len) {
fprintf(stderr, "Vector length mismatch\n");
return -1;
}
int err = 0;
double *device_a, *device_b, *device_partial, *host_partial;
int sizeInBytes = a.len * sizeof(double);
cudaError_t cudaStatus;
int gridSize = (int)ceil((float)a.len / BLOCKSIZE);
host_partial = (double *)calloc(gridSize, sizeof(double));
cudaStatus = cudaMalloc(&device_a, sizeInBytes);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "A: cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc(&device_b, sizeInBytes);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "B: cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc(&device_partial, gridSize * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "device_partial: cudaMalloc failed!\n");
goto Error;
}
cudaMemcpy(device_a, a.data, sizeInBytes, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "A: cudaMemcpy to device failed!\n");
goto Error;
}
cudaMemcpy(device_b, b.data, sizeInBytes, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "B: cudaMemcpy to device failed!\n");
goto Error;
}
dotProduct_dVector_kernel<<<gridSize, BLOCKSIZE >>>(device_a, device_b, device_partial, a.len);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dotProduct_dVector_kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaMemcpy(host_partial, device_partial, gridSize * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "partial: cudaMemcpy to host failed!\n");
goto Error;
}
Error:
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_partial);
if (cudaStatus != cudaSuccess) {
err = -1;
goto Exit;
}
double sum = 0;
for (int i = 0; i < gridSize; ++i) {
sum += host_partial[i];
}
*result = sum;
Exit:
free(host_partial);
return err;
}
void test_dotProduct(int n) {
dVector a, b, c;
dvector_init(a, n);
dvector_init(b, n);
double dotProduct = 0;
for (int i = 0; i < n; ++i) {
a.data[i] = 1;
b.data[i] = 2;
}
dotProduct_dVectors(a, b, &dotProduct);
//dotProduct should be 2*n
if (abs(dotProduct - 2 * n) > 1e-14) {
fprintf(stdout, "Dot Product=%f is not correct within error.\n", dotProduct);
}
else {
fprintf(stdout, "Dot Product=%f is correct within error.\n", dotProduct);
}
dvector_free(a);
dvector_free(b);
} |
2553b461e51f135d688bbd93440d35d7723ce6c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Vector addition on the GPU: C = A + B */
#include <stdio.h>
#include <stdlib.h>
// Max number of threads per block
#define SIZE 1024
// Device function (i.e. kernel)
__global__ void VecAdd(float * A, float * B, float * C, int N)
{
int i = threadIdx.x;
if ( i < N ) {
C[i] = A[i] + B[i];
}
}
// CPU version of the vector addition function
void vecAddCPU(float * A, float * B, float * C, int N)
{
int i;
for (i=0; i<N; i++)
{
C[i] = A[i] + B[i];
}
}
// Function compares two 1d arrays
void compareVecs( float * vec1, float * vec2, int N )
{
int i;
int vecsEqual = 1;
for (i=0; i<N; i++)
{
if ( abs (vec1[i] - vec2[i]) > 0.00001 )
{
printf("vectors not equal! i: %d vec1[i]: %f vec2[i]: %f\n",i,vec1[i],vec2[i]);
vecsEqual = 0;
}
}
if ( vecsEqual ) printf("GPU vector addition agrees with CPU version!\n");
}
/* Host function for filling vector (1d array) with
random numbers between -20.0 and 20.0 */
void fillOutVector( float * vec, int vec_length )
{
time_t t;
srand((unsigned) time(&t)); // initialize random number generator
int i;
for (i=0; i<vec_length; i++)
{
vec[i] = ( (float)rand() / (float)(RAND_MAX) ) * 40.0;
vec[i] -= 20.0;
}
}
// Host function for printing a vector (1d array)
void printVector( float * vec, int vec_length )
{
int i;
for (i=0; i<vec_length; i++) {
printf("i: %d vec[i]: %f\n",i,vec[i]);
}
}
// program execution begins here
int main( int argc, char ** argv )
{
// CUDA events allow us to measure execution time of our kernel
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// size_t is the appropriate type for bytes
size_t vec_bytes = SIZE * sizeof(float);
// host arrays
float * h_A = (float *)malloc( vec_bytes );
float * h_B = (float *)malloc( vec_bytes );
float * h_C = (float *)malloc( vec_bytes );
// fill array with random floats
fillOutVector( h_A, SIZE );
fillOutVector( h_B, SIZE );
// device arrays
float * d_A, * d_B, * d_C;
hipError_t rc; // return code from cuda functions
rc = hipMalloc(&d_A, vec_bytes);
if ( rc ) printf("%s\n",hipGetErrorString(rc));
hipMalloc(&d_B, vec_bytes);
hipMalloc(&d_C, vec_bytes);
// copy A and B to the device
hipMemcpy(d_A, h_A, vec_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, vec_bytes, hipMemcpyHostToDevice);
// dim3 is a 3-element struct with elements x, y, z (all ints)
dim3 threadsPerBlock(SIZE); // SIZE x 1 x 1
dim3 blocksPerGrid(1); // 1 x 1 x 1
// launch vector addition kernel!
hipEventRecord(start);
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_A, d_B, d_C, SIZE);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("kernel time (ms) : %7.5f\n",milliseconds);
// copy results to host
hipMemcpy(h_C, d_C, vec_bytes, hipMemcpyDeviceToHost);
//printVector( h_C, SIZE );
// verify that we got correct results
float * gold_C = (float *)malloc( vec_bytes );
hipEventRecord(start);
vecAddCPU( h_A, h_B, gold_C, SIZE );
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("cpu function time (ms) : %7.5f\n",milliseconds);
compareVecs( gold_C, h_C, SIZE );
// free memory on device
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// clean up timer variables
hipEventDestroy(start);
hipEventDestroy(stop);
// free memory on host
free(h_A);
free(h_B);
free(h_C);
free(gold_C);
return 0;
} | 2553b461e51f135d688bbd93440d35d7723ce6c5.cu | /* Vector addition on the GPU: C = A + B */
#include <stdio.h>
#include <stdlib.h>
// Max number of threads per block
#define SIZE 1024
// Device function (i.e. kernel)
__global__ void VecAdd(float * A, float * B, float * C, int N)
{
int i = threadIdx.x;
if ( i < N ) {
C[i] = A[i] + B[i];
}
}
// CPU version of the vector addition function
void vecAddCPU(float * A, float * B, float * C, int N)
{
int i;
for (i=0; i<N; i++)
{
C[i] = A[i] + B[i];
}
}
// Function compares two 1d arrays
void compareVecs( float * vec1, float * vec2, int N )
{
int i;
int vecsEqual = 1;
for (i=0; i<N; i++)
{
if ( abs (vec1[i] - vec2[i]) > 0.00001 )
{
printf("vectors not equal! i: %d vec1[i]: %f vec2[i]: %f\n",i,vec1[i],vec2[i]);
vecsEqual = 0;
}
}
if ( vecsEqual ) printf("GPU vector addition agrees with CPU version!\n");
}
/* Host function for filling vector (1d array) with
random numbers between -20.0 and 20.0 */
void fillOutVector( float * vec, int vec_length )
{
time_t t;
srand((unsigned) time(&t)); // initialize random number generator
int i;
for (i=0; i<vec_length; i++)
{
vec[i] = ( (float)rand() / (float)(RAND_MAX) ) * 40.0;
vec[i] -= 20.0;
}
}
// Host function for printing a vector (1d array)
void printVector( float * vec, int vec_length )
{
int i;
for (i=0; i<vec_length; i++) {
printf("i: %d vec[i]: %f\n",i,vec[i]);
}
}
// program execution begins here
int main( int argc, char ** argv )
{
// CUDA events allow us to measure execution time of our kernel
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// size_t is the appropriate type for bytes
size_t vec_bytes = SIZE * sizeof(float);
// host arrays
float * h_A = (float *)malloc( vec_bytes );
float * h_B = (float *)malloc( vec_bytes );
float * h_C = (float *)malloc( vec_bytes );
// fill array with random floats
fillOutVector( h_A, SIZE );
fillOutVector( h_B, SIZE );
// device arrays
float * d_A, * d_B, * d_C;
cudaError_t rc; // return code from cuda functions
rc = cudaMalloc(&d_A, vec_bytes);
if ( rc ) printf("%s\n",cudaGetErrorString(rc));
cudaMalloc(&d_B, vec_bytes);
cudaMalloc(&d_C, vec_bytes);
// copy A and B to the device
cudaMemcpy(d_A, h_A, vec_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, vec_bytes, cudaMemcpyHostToDevice);
// dim3 is a 3-element struct with elements x, y, z (all ints)
dim3 threadsPerBlock(SIZE); // SIZE x 1 x 1
dim3 blocksPerGrid(1); // 1 x 1 x 1
// launch vector addition kernel!
cudaEventRecord(start);
VecAdd<<< blocksPerGrid, threadsPerBlock >>>(d_A, d_B, d_C, SIZE);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel time (ms) : %7.5f\n",milliseconds);
// copy results to host
cudaMemcpy(h_C, d_C, vec_bytes, cudaMemcpyDeviceToHost);
//printVector( h_C, SIZE );
// verify that we got correct results
float * gold_C = (float *)malloc( vec_bytes );
cudaEventRecord(start);
vecAddCPU( h_A, h_B, gold_C, SIZE );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("cpu function time (ms) : %7.5f\n",milliseconds);
compareVecs( gold_C, h_C, SIZE );
// free memory on device
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// clean up timer variables
cudaEventDestroy(start);
cudaEventDestroy(stop);
// free memory on host
free(h_A);
free(h_B);
free(h_C);
free(gold_C);
return 0;
} |
373706b7cdf4264d8bf21a1cc339dbee3f21cd5b.hip | // !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <iostream>
#include <string>
#include <hip/hip_runtime.h>
// Charge une matrice disponible dans les repertoires exemples
bool load_matrix(char * filename, float * &matrix, int &nx, int &ny){
std::string line;
std::ifstream infile(filename);
if (!infile.is_open()) {
std::cout << "Fichier introuvable: "<< filename << std::endl;
return 0;
}
// Charge la taile de la matrice
infile >> nx >> ny;
// Alloue le tableau correspondant
matrix = new float[nx*ny];
// Charge la matrice
for (int i=0; i< nx*ny; i++){
infile >> matrix[i];
}
infile.close();
return 1;
}
// Calcul C = A * B
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
/// Insrer le code
int y = blockIdx.x * blockDim.x + threadIdx.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
if (x < numCColumns && y < numCRows) {
int i = y * numCColumns + x;
float s = 0;
for (int k = 0; k < numAColumns; k++) {
s += A[y * numAColumns + k] * B[k * numBColumns + x];
}
C[i] = s;
}
}
int main(int argc, char** argv) {
float* hostA;
float* hostB;
float* hostC;
float* hostExpectedOutput;
float* deviceA;
float* deviceB;
float* deviceC;
int numARows;
int numAColumns;
int numBRows;
int numBColumns;
int numCRows;
int numCColumns;
int numORows;
int numOColumns;
/// Charger le fichier d'entree
load_matrix(argv[1], hostA, numARows, numAColumns);
load_matrix(argv[2], hostB, numBRows, numBColumns);
if (numAColumns != numBRows){
std::cerr << "Loaded matrix are not compatible:their dimensions are: "
<< "(" << numARows << ", " << numAColumns << ") and (" << numBRows << ", " << numBColumns;
}
/// Initialiser numCRows et numCColumns
numCRows = numARows;
numCColumns = numBColumns;
/// Allouer hostC
hostC = new float[numCRows * numCColumns];
/// Afficher les informations sur la matrice
std::cout << "(" << numARows << ", " << numAColumns << ") x (" << numBRows << ", "
<< numBColumns << ") = ("
<< numCRows << ", " << numCColumns << ")" << std::endl;
/// Allouer la memoire sur GPU
hipMalloc((void**)&deviceA, sizeof(float) * numARows * numAColumns);
hipMalloc((void**)&deviceB, sizeof(float) * numBRows * numBColumns);
hipMalloc((void **)&deviceC, sizeof(float) * numCRows * numCColumns);
/// Copier la memoire sur le GPU
hipMemcpy(deviceA, hostA, sizeof(float) * numARows * numAColumns, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, sizeof(float) * numBRows * numBColumns, hipMemcpyHostToDevice);
/// Initialise la grille et les dimensions de chaque bloc
int block_size = 32;
dim3 block(block_size, block_size);
dim3 dim((int)(ceil((float)(numCRows) / block_size)),
(int)(ceil((float)(numCColumns) / block_size)));
std::cout << "Block size: (" << block.x << ", " << block.y << ", " << block.z << ")" << std::endl;
std::cout << "Grid size: (" << dim.x << ", " << dim.y << ", " << dim.z << ")" << std::endl;
/// Execute le kernel
hipLaunchKernelGGL(( matrixMultiply), dim3(dim), dim3(block), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns,
numCRows, numCColumns);
hipDeviceSynchronize();
/// Charge le resultat en memoire CPU
hipMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
load_matrix(argv[3], hostExpectedOutput, numORows, numOColumns);
if (numOColumns != numCColumns
|| numORows != numORows) {
std::cerr << "Output matrix have wrong dimensions" << std::endl;
std::cerr << "(" << numORows << ", " << numOColumns << ") != ("
<< numCRows << ", " << numCColumns << ")" << std::endl;
}
float error = 0;
for (int i = 0; i < numCColumns * numCRows; i++) {
error += (hostExpectedOutput[i] - hostC[i]) * (hostExpectedOutput[i] - hostC[i]);
}
error /= (float)(numCColumns * numCRows);
/// Libere la memoire
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
delete hostExpectedOutput;
delete hostA;
delete hostB;
delete hostC;
return 0;
}
| 373706b7cdf4264d8bf21a1cc339dbee3f21cd5b.cu | #include <fstream>
#include <iostream>
#include <string>
#include <cuda_runtime.h>
// Charge une matrice disponible dans les repertoires exemples
bool load_matrix(char * filename, float * &matrix, int &nx, int &ny){
std::string line;
std::ifstream infile(filename);
if (!infile.is_open()) {
std::cout << "Fichier introuvable: "<< filename << std::endl;
return 0;
}
// Charge la taile de la matrice
infile >> nx >> ny;
// Alloue le tableau correspondant
matrix = new float[nx*ny];
// Charge la matrice
for (int i=0; i< nx*ny; i++){
infile >> matrix[i];
}
infile.close();
return 1;
}
// Calcul C = A * B
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
/// Insérer le code
int y = blockIdx.x * blockDim.x + threadIdx.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
if (x < numCColumns && y < numCRows) {
int i = y * numCColumns + x;
float s = 0;
for (int k = 0; k < numAColumns; k++) {
s += A[y * numAColumns + k] * B[k * numBColumns + x];
}
C[i] = s;
}
}
int main(int argc, char** argv) {
float* hostA;
float* hostB;
float* hostC;
float* hostExpectedOutput;
float* deviceA;
float* deviceB;
float* deviceC;
int numARows;
int numAColumns;
int numBRows;
int numBColumns;
int numCRows;
int numCColumns;
int numORows;
int numOColumns;
/// Charger le fichier d'entree
load_matrix(argv[1], hostA, numARows, numAColumns);
load_matrix(argv[2], hostB, numBRows, numBColumns);
if (numAColumns != numBRows){
std::cerr << "Loaded matrix are not compatible: their dimensions are: "
<< "(" << numARows << ", " << numAColumns << ") and (" << numBRows << ", " << numBColumns;
}
/// Initialiser numCRows et numCColumns
numCRows = numARows;
numCColumns = numBColumns;
/// Allouer hostC
hostC = new float[numCRows * numCColumns];
/// Afficher les informations sur la matrice
std::cout << "(" << numARows << ", " << numAColumns << ") x (" << numBRows << ", "
<< numBColumns << ") = ("
<< numCRows << ", " << numCColumns << ")" << std::endl;
/// Allouer la memoire sur GPU
cudaMalloc((void**)&deviceA, sizeof(float) * numARows * numAColumns);
cudaMalloc((void**)&deviceB, sizeof(float) * numBRows * numBColumns);
cudaMalloc((void **)&deviceC, sizeof(float) * numCRows * numCColumns);
/// Copier la memoire sur le GPU
cudaMemcpy(deviceA, hostA, sizeof(float) * numARows * numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(float) * numBRows * numBColumns, cudaMemcpyHostToDevice);
/// Initialise la grille et les dimensions de chaque bloc
int block_size = 32;
dim3 block(block_size, block_size);
dim3 dim((int)(ceil((float)(numCRows) / block_size)),
(int)(ceil((float)(numCColumns) / block_size)));
std::cout << "Block size: (" << block.x << ", " << block.y << ", " << block.z << ")" << std::endl;
std::cout << "Grid size: (" << dim.x << ", " << dim.y << ", " << dim.z << ")" << std::endl;
/// Execute le kernel
matrixMultiply<<<dim, block>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns,
numCRows, numCColumns);
cudaDeviceSynchronize();
/// Charge le resultat en memoire CPU
cudaMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
load_matrix(argv[3], hostExpectedOutput, numORows, numOColumns);
if (numOColumns != numCColumns
|| numORows != numORows) {
std::cerr << "Output matrix have wrong dimensions" << std::endl;
std::cerr << "(" << numORows << ", " << numOColumns << ") != ("
<< numCRows << ", " << numCColumns << ")" << std::endl;
}
float error = 0;
for (int i = 0; i < numCColumns * numCRows; i++) {
error += (hostExpectedOutput[i] - hostC[i]) * (hostExpectedOutput[i] - hostC[i]);
}
error /= (float)(numCColumns * numCRows);
/// Libere la memoire
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
delete hostExpectedOutput;
delete hostA;
delete hostB;
delete hostC;
return 0;
}
|
0eeb90c6e4781a884213079195fb8ebb97fedc89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void sub_float(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = a[i] - b[i];
}
} | 0eeb90c6e4781a884213079195fb8ebb97fedc89.cu | #include "includes.h"
extern "C"
__global__ void sub_float(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = a[i] - b[i];
}
} |
51ee1769d4780183bdd46ad5113535ee110736da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
namespace fastgm {
namespace inference_result_kernel {
const int BLOCK_SIZE = 512;
/**
* Normalize each row of a matrix so that every row sums to 1
*/
__global__ void normalize_rows(device_matrix<float> m) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m.rows()) {
float sum = 0;
for (int j = 0; j < m.cols(); j++)
sum += m(i, j);
for (int j = 0; j < m.cols(); j++)
m(i, j) /= sum;
}
}
}
}
| 51ee1769d4780183bdd46ad5113535ee110736da.cu | namespace fastgm {
namespace inference_result_kernel {
const int BLOCK_SIZE = 512;
/**
* Normalize each row of a matrix so that every row sums to 1
*/
__global__ void normalize_rows(device_matrix<float> m) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m.rows()) {
float sum = 0;
for (int j = 0; j < m.cols(); j++)
sum += m(i, j);
for (int j = 0; j < m.cols(); j++)
m(i, j) /= sum;
}
}
}
}
|
94ca9aa194d552c9655afbf248bd84b5b7884a28.hip | // !!! This is a file automatically generated by hipify!!!
//
// Author: Felice Pantaleo, CERN
//
#include "RiemannFitOnGPU.h"
#include "RecoPixelVertexing/PixelTrackFitting/interface/RiemannFit.h"
#include <cstdint>
#include <hip/hip_runtime.h>
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h"
#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforGPU.h"
#include "RecoLocalTracker/SiPixelRecHits/plugins/siPixelRecHitsHeterogeneousProduct.h"
using HitsOnCPU = siPixelRecHitsHeterogeneousProduct::HitsOnCPU;
using HitsOnGPU = siPixelRecHitsHeterogeneousProduct::HitsOnGPU;
using TuplesOnGPU = pixelTuplesHeterogeneousProduct::TuplesOnGPU;
using namespace Eigen;
__global__
void kernelFastFitAllHits(TuplesOnGPU::Container const * __restrict__ foundNtuplets,
HitsOnGPU const * __restrict__ hhp,
int hits_in_fit,
double * __restrict__ phits,
float * __restrict__ phits_ge,
double * __restrict__ pfast_fit,
uint32_t offset)
{
assert(hits_in_fit==4); // FixMe later template
assert(pfast_fit); assert(foundNtuplets);
auto local_start = (blockIdx.x * blockDim.x + threadIdx.x);
auto helix_start = local_start + offset;
if (helix_start>=foundNtuplets->nbins()) return;
if (foundNtuplets->size(helix_start)<hits_in_fit) {
return;
}
Rfit::Map3x4d hits(phits+local_start);
Rfit::Map4d fast_fit(pfast_fit+local_start);
Rfit::Map6x4f hits_ge(phits_ge+local_start);
// Prepare data structure
auto const * hitId = foundNtuplets->begin(helix_start);
for (unsigned int i = 0; i < hits_in_fit; ++i) {
auto hit = hitId[i];
// printf("Hit global: %f,%f,%f\n", hhp->xg_d[hit],hhp->yg_d[hit],hhp->zg_d[hit]);
float ge[6];
hhp->cpeParams->detParams(hhp->detInd_d[hit]).frame.toGlobal(hhp->xerr_d[hit], 0, hhp->yerr_d[hit], ge);
// printf("Error: %d: %f,%f,%f,%f,%f,%f\n",hhp->detInd_d[hit],ge[0],ge[1],ge[2],ge[3],ge[4],ge[5]);
hits.col(i) << hhp->xg_d[hit], hhp->yg_d[hit], hhp->zg_d[hit];
hits_ge.col(i) << ge[0],ge[1],ge[2],ge[3],ge[4],ge[5];
}
Rfit::Fast_fit(hits,fast_fit);
// no NaN here....
assert(fast_fit(0)==fast_fit(0));
assert(fast_fit(1)==fast_fit(1));
assert(fast_fit(2)==fast_fit(2));
assert(fast_fit(3)==fast_fit(3));
}
__global__
void kernelCircleFitAllHits(TuplesOnGPU::Container const * __restrict__ foundNtuplets,
int hits_in_fit,
double B,
double * __restrict__ phits,
float * __restrict__ phits_ge,
double * __restrict__ pfast_fit_input,
Rfit::circle_fit *circle_fit,
uint32_t offset)
{
assert(circle_fit);
auto local_start = (blockIdx.x * blockDim.x + threadIdx.x);
auto helix_start = local_start + offset;
if (helix_start>=foundNtuplets->nbins()) return;
if (foundNtuplets->size(helix_start)<hits_in_fit) {
return;
}
Rfit::Map3x4d hits(phits+local_start);
Rfit::Map4d fast_fit(pfast_fit_input+local_start);
Rfit::Map6x4f hits_ge(phits_ge+local_start);
constexpr uint32_t N = Rfit::Map3x4d::ColsAtCompileTime;
constexpr auto n = N;
assert(4==n); // later will be templated...
Rfit::VectorNd<N> rad = (hits.block(0, 0, 2, n).colwise().norm());
Rfit::Matrix2Nd<N> hits_cov = Rfit::Matrix2Nd<4>::Zero();
Rfit::loadCovariance2D(hits_ge,hits_cov);
circle_fit[local_start] =
Rfit::Circle_fit(hits.block(0, 0, 2, n),
hits_cov,
fast_fit, rad, B, true);
#ifdef GPU_DEBUG
// printf("kernelCircleFitAllHits circle.par(0,1,2): %d %f,%f,%f\n", helix_start,
// circle_fit[local_start].par(0), circle_fit[local_start].par(1), circle_fit[local_start].par(2));
#endif
}
__global__
void kernelLineFitAllHits(TuplesOnGPU::Container const * __restrict__ foundNtuplets,
int hits_in_fit,
double B,
Rfit::helix_fit *results,
double * __restrict__ phits,
float * __restrict__ phits_ge,
double * __restrict__ pfast_fit,
Rfit::circle_fit * __restrict__ circle_fit,
uint32_t offset)
{
assert(results); assert(circle_fit);
auto local_start = (blockIdx.x * blockDim.x + threadIdx.x);
auto helix_start = local_start + offset;
if (helix_start>=foundNtuplets->nbins()) return;
if (foundNtuplets->size(helix_start)<hits_in_fit) {
return;
}
Rfit::Map3x4d hits(phits+local_start);
Rfit::Map4d fast_fit(pfast_fit+local_start);
Rfit::Map6x4f hits_ge(phits_ge+local_start);
auto const & line_fit = Rfit::Line_fit(hits, hits_ge, circle_fit[local_start], fast_fit, B, true);
par_uvrtopak(circle_fit[local_start], B, true);
// Grab helix_fit from the proper location in the output vector
auto & helix = results[helix_start];
helix.par << circle_fit[local_start].par, line_fit.par;
// TODO: pass properly error booleans
helix.cov = Rfit::Matrix5d::Zero();
helix.cov.block(0, 0, 3, 3) = circle_fit[local_start].cov;
helix.cov.block(3, 3, 2, 2) = line_fit.cov;
helix.q = circle_fit[local_start].q;
helix.chi2_circle = circle_fit[local_start].chi2;
helix.chi2_line = line_fit.chi2;
#ifdef GPU_DEBUG
printf("kernelLineFitAllHits circle.par(0,1,2): %d %f,%f,%f\n", helix_start,
circle_fit[local_start].par(0), circle_fit[local_start].par(1), circle_fit[local_start].par(2));
printf("kernelLineFitAllHits line.par(0,1): %d %f,%f\n", helix_start, line_fit.par(0),line_fit.par(1));
printf("kernelLineFitAllHits chi2 cov %f/%f %f,%f,%f,%f,%f\n",helix.chi2_circle,helix.chi2_line,
helix.cov(0,0),helix.cov(1,1),helix.cov(2,2),helix.cov(3,3),helix.cov(4,4));
#endif
}
void RiemannFitOnGPU::launchKernels(HitsOnCPU const & hh, uint32_t nhits, uint32_t maxNumberOfTuples, hipStream_t cudaStream)
{
assert(tuples_d); assert(fast_fit_resultsGPU_);
auto blockSize = 128;
auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize;
for (uint32_t offset=0; offset<maxNumberOfTuples; offset+=maxNumberOfConcurrentFits_) {
hipLaunchKernelGGL(( kernelFastFitAllHits), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
tuples_d, hh.gpu_d, 4,
hitsGPU_, hits_geGPU_, fast_fit_resultsGPU_,offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernelCircleFitAllHits), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
tuples_d, 4, bField_,
hitsGPU_, hits_geGPU_, fast_fit_resultsGPU_, circle_fit_resultsGPU_, offset);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernelLineFitAllHits), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
tuples_d, 4, bField_, helix_fit_results_d,
hitsGPU_, hits_geGPU_, fast_fit_resultsGPU_, circle_fit_resultsGPU_,
offset);
cudaCheck(hipGetLastError());
}
}
| 94ca9aa194d552c9655afbf248bd84b5b7884a28.cu | //
// Author: Felice Pantaleo, CERN
//
#include "RiemannFitOnGPU.h"
#include "RecoPixelVertexing/PixelTrackFitting/interface/RiemannFit.h"
#include <cstdint>
#include <cuda_runtime.h>
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h"
#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforGPU.h"
#include "RecoLocalTracker/SiPixelRecHits/plugins/siPixelRecHitsHeterogeneousProduct.h"
using HitsOnCPU = siPixelRecHitsHeterogeneousProduct::HitsOnCPU;
using HitsOnGPU = siPixelRecHitsHeterogeneousProduct::HitsOnGPU;
using TuplesOnGPU = pixelTuplesHeterogeneousProduct::TuplesOnGPU;
using namespace Eigen;
__global__
void kernelFastFitAllHits(TuplesOnGPU::Container const * __restrict__ foundNtuplets,
HitsOnGPU const * __restrict__ hhp,
int hits_in_fit,
double * __restrict__ phits,
float * __restrict__ phits_ge,
double * __restrict__ pfast_fit,
uint32_t offset)
{
assert(hits_in_fit==4); // FixMe later template
assert(pfast_fit); assert(foundNtuplets);
auto local_start = (blockIdx.x * blockDim.x + threadIdx.x);
auto helix_start = local_start + offset;
if (helix_start>=foundNtuplets->nbins()) return;
if (foundNtuplets->size(helix_start)<hits_in_fit) {
return;
}
Rfit::Map3x4d hits(phits+local_start);
Rfit::Map4d fast_fit(pfast_fit+local_start);
Rfit::Map6x4f hits_ge(phits_ge+local_start);
// Prepare data structure
auto const * hitId = foundNtuplets->begin(helix_start);
for (unsigned int i = 0; i < hits_in_fit; ++i) {
auto hit = hitId[i];
// printf("Hit global: %f,%f,%f\n", hhp->xg_d[hit],hhp->yg_d[hit],hhp->zg_d[hit]);
float ge[6];
hhp->cpeParams->detParams(hhp->detInd_d[hit]).frame.toGlobal(hhp->xerr_d[hit], 0, hhp->yerr_d[hit], ge);
// printf("Error: %d: %f,%f,%f,%f,%f,%f\n",hhp->detInd_d[hit],ge[0],ge[1],ge[2],ge[3],ge[4],ge[5]);
hits.col(i) << hhp->xg_d[hit], hhp->yg_d[hit], hhp->zg_d[hit];
hits_ge.col(i) << ge[0],ge[1],ge[2],ge[3],ge[4],ge[5];
}
Rfit::Fast_fit(hits,fast_fit);
// no NaN here....
assert(fast_fit(0)==fast_fit(0));
assert(fast_fit(1)==fast_fit(1));
assert(fast_fit(2)==fast_fit(2));
assert(fast_fit(3)==fast_fit(3));
}
__global__
void kernelCircleFitAllHits(TuplesOnGPU::Container const * __restrict__ foundNtuplets,
int hits_in_fit,
double B,
double * __restrict__ phits,
float * __restrict__ phits_ge,
double * __restrict__ pfast_fit_input,
Rfit::circle_fit *circle_fit,
uint32_t offset)
{
assert(circle_fit);
auto local_start = (blockIdx.x * blockDim.x + threadIdx.x);
auto helix_start = local_start + offset;
if (helix_start>=foundNtuplets->nbins()) return;
if (foundNtuplets->size(helix_start)<hits_in_fit) {
return;
}
Rfit::Map3x4d hits(phits+local_start);
Rfit::Map4d fast_fit(pfast_fit_input+local_start);
Rfit::Map6x4f hits_ge(phits_ge+local_start);
constexpr uint32_t N = Rfit::Map3x4d::ColsAtCompileTime;
constexpr auto n = N;
assert(4==n); // later will be templated...
Rfit::VectorNd<N> rad = (hits.block(0, 0, 2, n).colwise().norm());
Rfit::Matrix2Nd<N> hits_cov = Rfit::Matrix2Nd<4>::Zero();
Rfit::loadCovariance2D(hits_ge,hits_cov);
circle_fit[local_start] =
Rfit::Circle_fit(hits.block(0, 0, 2, n),
hits_cov,
fast_fit, rad, B, true);
#ifdef GPU_DEBUG
// printf("kernelCircleFitAllHits circle.par(0,1,2): %d %f,%f,%f\n", helix_start,
// circle_fit[local_start].par(0), circle_fit[local_start].par(1), circle_fit[local_start].par(2));
#endif
}
__global__
void kernelLineFitAllHits(TuplesOnGPU::Container const * __restrict__ foundNtuplets,
int hits_in_fit,
double B,
Rfit::helix_fit *results,
double * __restrict__ phits,
float * __restrict__ phits_ge,
double * __restrict__ pfast_fit,
Rfit::circle_fit * __restrict__ circle_fit,
uint32_t offset)
{
assert(results); assert(circle_fit);
auto local_start = (blockIdx.x * blockDim.x + threadIdx.x);
auto helix_start = local_start + offset;
if (helix_start>=foundNtuplets->nbins()) return;
if (foundNtuplets->size(helix_start)<hits_in_fit) {
return;
}
Rfit::Map3x4d hits(phits+local_start);
Rfit::Map4d fast_fit(pfast_fit+local_start);
Rfit::Map6x4f hits_ge(phits_ge+local_start);
auto const & line_fit = Rfit::Line_fit(hits, hits_ge, circle_fit[local_start], fast_fit, B, true);
par_uvrtopak(circle_fit[local_start], B, true);
// Grab helix_fit from the proper location in the output vector
auto & helix = results[helix_start];
helix.par << circle_fit[local_start].par, line_fit.par;
// TODO: pass properly error booleans
helix.cov = Rfit::Matrix5d::Zero();
helix.cov.block(0, 0, 3, 3) = circle_fit[local_start].cov;
helix.cov.block(3, 3, 2, 2) = line_fit.cov;
helix.q = circle_fit[local_start].q;
helix.chi2_circle = circle_fit[local_start].chi2;
helix.chi2_line = line_fit.chi2;
#ifdef GPU_DEBUG
printf("kernelLineFitAllHits circle.par(0,1,2): %d %f,%f,%f\n", helix_start,
circle_fit[local_start].par(0), circle_fit[local_start].par(1), circle_fit[local_start].par(2));
printf("kernelLineFitAllHits line.par(0,1): %d %f,%f\n", helix_start, line_fit.par(0),line_fit.par(1));
printf("kernelLineFitAllHits chi2 cov %f/%f %f,%f,%f,%f,%f\n",helix.chi2_circle,helix.chi2_line,
helix.cov(0,0),helix.cov(1,1),helix.cov(2,2),helix.cov(3,3),helix.cov(4,4));
#endif
}
void RiemannFitOnGPU::launchKernels(HitsOnCPU const & hh, uint32_t nhits, uint32_t maxNumberOfTuples, cudaStream_t cudaStream)
{
assert(tuples_d); assert(fast_fit_resultsGPU_);
auto blockSize = 128;
auto numberOfBlocks = (maxNumberOfConcurrentFits_ + blockSize - 1) / blockSize;
for (uint32_t offset=0; offset<maxNumberOfTuples; offset+=maxNumberOfConcurrentFits_) {
kernelFastFitAllHits<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
tuples_d, hh.gpu_d, 4,
hitsGPU_, hits_geGPU_, fast_fit_resultsGPU_,offset);
cudaCheck(cudaGetLastError());
kernelCircleFitAllHits<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
tuples_d, 4, bField_,
hitsGPU_, hits_geGPU_, fast_fit_resultsGPU_, circle_fit_resultsGPU_, offset);
cudaCheck(cudaGetLastError());
kernelLineFitAllHits<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
tuples_d, 4, bField_, helix_fit_results_d,
hitsGPU_, hits_geGPU_, fast_fit_resultsGPU_, circle_fit_resultsGPU_,
offset);
cudaCheck(cudaGetLastError());
}
}
|
330fb0862c016cea08c350670d4ce0b1f173493d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 29-Oct-2012 09:37:53
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
double *ind_arg0,
double *ind_arg1,
double *ind_arg2,
double *ind_arg3,
int *ind_map,
short *arg_map,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg6_l[4];
double arg7_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ double *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*4];
ind_arg1_map = &ind_map[2*set_size] + ind_arg_offs[1+blockId*4];
ind_arg2_map = &ind_map[4*set_size] + ind_arg_offs[2+blockId*4];
ind_arg3_map = &ind_map[6*set_size] + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4);
ind_arg2_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1);
ind_arg3_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg6_l[d] = ZERO_double;
for (int d=0; d<4; d++)
arg7_l[d] = ZERO_double;
// user-supplied kernel call
res_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2,
ind_arg0_s+arg_map[1*set_size+n+offset_b]*2,
ind_arg1_s+arg_map[2*set_size+n+offset_b]*4,
ind_arg1_s+arg_map[3*set_size+n+offset_b]*4,
ind_arg2_s+arg_map[4*set_size+n+offset_b]*1,
ind_arg2_s+arg_map[5*set_size+n+offset_b]*1,
arg6_l,
arg7_l );
col2 = colors[n+offset_b];
}
// store local variables
int arg6_map;
int arg7_map;
if (col2>=0) {
arg6_map = arg_map[6*set_size+n+offset_b];
arg7_map = arg_map[7*set_size+n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg6_map*4] += arg6_l[d];
for (int d=0; d<4; d++)
ind_arg3_s[d+arg7_map*4] += arg7_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7 ){
int nargs = 8;
op_arg args[8];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
// get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0;
op_timing_realloc(2);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
if (set->size >0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
op_timers_core(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
if (col==Plan->ncolors_core) op_mpi_wait_all_cuda(nargs,args);
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d,
(double *)arg2.data_d,
(double *)arg4.data_d,
(double *)arg6.data_d,
Plan->ind_map,
Plan->loc_map,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set_size);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
}
block_offset += Plan->ncolblk[col];
}
op_timing_realloc(2);
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[2].time += wall_t2 - wall_t1;
}
| 330fb0862c016cea08c350670d4ce0b1f173493d.cu | //
// auto-generated by op2.m on 29-Oct-2012 09:37:53
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
double *ind_arg0,
double *ind_arg1,
double *ind_arg2,
double *ind_arg3,
int *ind_map,
short *arg_map,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg6_l[4];
double arg7_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ double *ind_arg0_s;
__shared__ double *ind_arg1_s;
__shared__ double *ind_arg2_s;
__shared__ double *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*4];
ind_arg1_map = &ind_map[2*set_size] + ind_arg_offs[1+blockId*4];
ind_arg2_map = &ind_map[4*set_size] + ind_arg_offs[2+blockId*4];
ind_arg3_map = &ind_map[6*set_size] + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2);
ind_arg1_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4);
ind_arg2_s = (double *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1);
ind_arg3_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg6_l[d] = ZERO_double;
for (int d=0; d<4; d++)
arg7_l[d] = ZERO_double;
// user-supplied kernel call
res_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2,
ind_arg0_s+arg_map[1*set_size+n+offset_b]*2,
ind_arg1_s+arg_map[2*set_size+n+offset_b]*4,
ind_arg1_s+arg_map[3*set_size+n+offset_b]*4,
ind_arg2_s+arg_map[4*set_size+n+offset_b]*1,
ind_arg2_s+arg_map[5*set_size+n+offset_b]*1,
arg6_l,
arg7_l );
col2 = colors[n+offset_b];
}
// store local variables
int arg6_map;
int arg7_map;
if (col2>=0) {
arg6_map = arg_map[6*set_size+n+offset_b];
arg7_map = arg_map[7*set_size+n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg6_map*4] += arg6_l[d];
for (int d=0; d<4; d++)
ind_arg3_s[d+arg7_map*4] += arg7_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7 ){
int nargs = 8;
op_arg args[8];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
int ninds = 4;
int inds[8] = {0,0,1,1,2,2,3,3};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
// get plan
#ifdef OP_PART_SIZE_2
int part_size = OP_PART_SIZE_2;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0;
op_timing_realloc(2);
OP_kernels[2].name = name;
OP_kernels[2].count += 1;
if (set->size >0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
op_timers_core(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
if (col==Plan->ncolors_core) op_mpi_wait_all_cuda(nargs,args);
#ifdef OP_BLOCK_SIZE_2
int nthread = OP_BLOCK_SIZE_2;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = Plan->nsharedCol[col];
op_cuda_res_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d,
(double *)arg2.data_d,
(double *)arg4.data_d,
(double *)arg6.data_d,
Plan->ind_map,
Plan->loc_map,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set_size);
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
}
block_offset += Plan->ncolblk[col];
}
op_timing_realloc(2);
OP_kernels[2].transfer += Plan->transfer;
OP_kernels[2].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[2].time += wall_t2 - wall_t1;
}
|
51497253cedfd6c35a9855404f03823173ba1819.hip | // !!! This is a file automatically generated by hipify!!!
/*
* The code has been written by Karan Bhanot, Abolaji Adesoji, Aditya Joshi and Dhyanjyoti Nath.
*
* Some function definitions are referenced from
* sample code provided by Christopher D. Carothers,
* provided as part of his class assignment of Parallel Computing
* Spring 2020.
*/
// Include headers (including CUDA)
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include<cuda.h>
#include<cuda_runtime.h>
// Buffer
extern long long *buf;
/*
* Returns the inialized buffer on CUDA
*/
extern "C" void getBuffer( int rank, int numranks, long long blocksize )
{
// Check and assign the device for this MPI rank
hipError_t cE;
int cudaDeviceCount;
// Check if enough devices are available
if ((cE = hipGetDeviceCount(&cudaDeviceCount)) != hipSuccess) {
printf("Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount);
exit(-1);
}
// Set device given that it is available
if ((cE = hipSetDevice(rank % cudaDeviceCount)) != hipSuccess) {
printf(" Unable to have rank %d set to cuda device %d, error is %d \n", rank, (rank % cudaDeviceCount), cE);
exit(-1);
}
// Assign memory to the buf variable
hipMallocManaged(&buf, blocksize);
}
| 51497253cedfd6c35a9855404f03823173ba1819.cu | /*
* The code has been written by Karan Bhanot, Abolaji Adesoji, Aditya Joshi and Dhyanjyoti Nath.
*
* Some function definitions are referenced from
* sample code provided by Christopher D. Carothers,
* provided as part of his class assignment of Parallel Computing
* Spring 2020.
*/
// Include headers (including CUDA)
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include<cuda.h>
#include<cuda_runtime.h>
// Buffer
extern long long *buf;
/*
* Returns the inialized buffer on CUDA
*/
extern "C" void getBuffer( int rank, int numranks, long long blocksize )
{
// Check and assign the device for this MPI rank
cudaError_t cE;
int cudaDeviceCount;
// Check if enough devices are available
if ((cE = cudaGetDeviceCount(&cudaDeviceCount)) != cudaSuccess) {
printf("Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount);
exit(-1);
}
// Set device given that it is available
if ((cE = cudaSetDevice(rank % cudaDeviceCount)) != cudaSuccess) {
printf(" Unable to have rank %d set to cuda device %d, error is %d \n", rank, (rank % cudaDeviceCount), cE);
exit(-1);
}
// Assign memory to the buf variable
cudaMallocManaged(&buf, blocksize);
}
|
960fc296723fe2a612a620daf037a5bb3f6048dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void holaCUDA(float e) {
printf("Hola, soy el hilo %i del bloque %i con valor pi -> %f \n",threadIdx.x,blockIdx.x,e);
}
int main(int argc, char **argv){
hipLaunchKernelGGL(( holaCUDA), dim3(3),dim3(4), 0, 0, 3.1416);
hipDeviceReset();
return 0;
} | 960fc296723fe2a612a620daf037a5bb3f6048dc.cu | #include <stdio.h>
__global__ void holaCUDA(float e) {
printf("Hola, soy el hilo %i del bloque %i con valor pi -> %f \n",threadIdx.x,blockIdx.x,e);
}
int main(int argc, char **argv){
holaCUDA<<<3,4>>>(3.1416);
cudaDeviceReset();
return 0;
} |
391cfa069fce907c3a97f122fb49548a2cb2e1c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <math_constants.h>
extern "C"
{
__global__ void
rtruncnorm_kernel(
float *x, int n,
float *mu, float *sigma,
float *lo, float *hi,
int maxtries, int rng_a,
int rng_b, int rng_c)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < n){
// Setup the RNG:
hiprandState_t rng;
hiprand_init(rng_a+rng_b*idx,rng_c,0,&rng);
// Draw sample
int ntries = 0;
int accepted = 0;
float ran;
while(!accepted and ntries < maxtries){
ran = mu[idx]+sigma[idx]*hiprand_normal(&rng);
ntries += 1;
if(ran >= lo[idx] and ran <= hi[idx]){
accepted = 1;
}
}
// Use Robert Method if that didn't work
if(!accepted){
// my code is set up to sample only (a,infty), so if it's a (-infty,b) sample we want, then we sample from (-b,infty) and reverse the sign after
int rev_sign = 0;
float lower;
if(isfinite(hi[idx])){
lower = lo[idx]-mu[idx];
}else{
lower = mu[idx]-hi[idx];
rev_sign = 1;
}
float alpha = (lower+sqrtf(lower*lower+4))/2;
float z;
int ntries = 0;
// I may well have done something wrong...but for some datasets, this while loop never ended if I didn't set a max # of tries.
while(!accepted and ntries < 10000L){
ntries += 1;
float psi;
// sample uniform, then use inverse cdf to get sample from exponential distribution:
z = lower-logf(hiprand_uniform(&rng))/alpha;
if(lower<alpha){
psi = expf(-powf((alpha-z),2)/2);
}else{
psi = expf(-powf((alpha-z),2)/2)*expf(powf((lower-alpha),2)/2);
}
float u = hiprand_uniform(&rng);
if(u<psi){
accepted = 1;
}
}
if(rev_sign){
ran = mu[idx]-z;
}else{
ran = mu[idx]+z;
}
// If the Robert method failed to accept in 10000 tries:
if(!accepted){
ran = CUDART_NAN_F;
}
}
x[idx] = ran;
}
return;
}
} // END extern "C"
| 391cfa069fce907c3a97f122fb49548a2cb2e1c0.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
extern "C"
{
__global__ void
rtruncnorm_kernel(
float *x, int n,
float *mu, float *sigma,
float *lo, float *hi,
int maxtries, int rng_a,
int rng_b, int rng_c)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < n){
// Setup the RNG:
curandState_t rng;
curand_init(rng_a+rng_b*idx,rng_c,0,&rng);
// Draw sample
int ntries = 0;
int accepted = 0;
float ran;
while(!accepted and ntries < maxtries){
ran = mu[idx]+sigma[idx]*curand_normal(&rng);
ntries += 1;
if(ran >= lo[idx] and ran <= hi[idx]){
accepted = 1;
}
}
// Use Robert Method if that didn't work
if(!accepted){
// my code is set up to sample only (a,infty), so if it's a (-infty,b) sample we want, then we sample from (-b,infty) and reverse the sign after
int rev_sign = 0;
float lower;
if(isfinite(hi[idx])){
lower = lo[idx]-mu[idx];
}else{
lower = mu[idx]-hi[idx];
rev_sign = 1;
}
float alpha = (lower+sqrtf(lower*lower+4))/2;
float z;
int ntries = 0;
// I may well have done something wrong...but for some datasets, this while loop never ended if I didn't set a max # of tries.
while(!accepted and ntries < 10000L){
ntries += 1;
float psi;
// sample uniform, then use inverse cdf to get sample from exponential distribution:
z = lower-logf(curand_uniform(&rng))/alpha;
if(lower<alpha){
psi = expf(-powf((alpha-z),2)/2);
}else{
psi = expf(-powf((alpha-z),2)/2)*expf(powf((lower-alpha),2)/2);
}
float u = curand_uniform(&rng);
if(u<psi){
accepted = 1;
}
}
if(rev_sign){
ran = mu[idx]-z;
}else{
ran = mu[idx]+z;
}
// If the Robert method failed to accept in 10000 tries:
if(!accepted){
ran = CUDART_NAN_F;
}
}
x[idx] = ran;
}
return;
}
} // END extern "C"
|
e98224e0582fb3da4a3ce82ca7ba30f31902cb9a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convolutionY_63_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Dst = NULL;
hipMalloc(&d_Dst, XSIZE*YSIZE);
float *d_Src = NULL;
hipMalloc(&d_Src, XSIZE*YSIZE);
int imageW = 1;
int imageH = 1;
int imageD = 1;
int outofbounds = 1;
float outofboundsvalue = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convolutionY_63_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convolutionY_63_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convolutionY_63_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e98224e0582fb3da4a3ce82ca7ba30f31902cb9a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convolutionY_63_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Dst = NULL;
cudaMalloc(&d_Dst, XSIZE*YSIZE);
float *d_Src = NULL;
cudaMalloc(&d_Src, XSIZE*YSIZE);
int imageW = 1;
int imageH = 1;
int imageD = 1;
int outofbounds = 1;
float outofboundsvalue = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convolutionY_63_Kernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convolutionY_63_Kernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convolutionY_63_Kernel<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,imageD,outofbounds,outofboundsvalue);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0e2c9b3fd23ca1739bf01f5d79fbdf81d745a3ea.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <hip/hip_runtime_api.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include "defs.h"
typedef unsigned int uint;
typedef unsigned char uchar;
hipArray *d_volumeArray1 = 0;
hipArray *d_transferFuncArray1;
typedef unsigned char VolumeType;
texture<VolumeType, 3, hipReadModeNormalizedFloat> tex1; // 3D texture
texture<float4, 1, hipReadModeElementType> transferTex1; // 1D transfer function texture
typedef struct {
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
__constant__ int dID;
struct Ray {
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
__device__ float4
get_pix_val( int src, int maxSteps, Ray eyeRay, float tstep, float tnear, float tfar,
float Offset, float Scale, float dens, float weight, float opacity )
{
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
float4 sum = make_float4(0.0f);
for(int i=0; i<maxSteps; i++)
{
// read from 3D texture
// remap position to [0, 1] coordinates
float sample;
float4 col;
if (src == 1){
sample = tex3D(tex1, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
col = tex1D(transferTex1, (sample-Offset)*Scale);
}
//sample *= 64.0f; // scale for 10-bit data
// lookup in transfer function texture
col.w *= dens * weight;
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col*(1.0f - sum.w);
// exit early if opaque
//if (sum.w > opacity)
// break;
t += tstep;
if (t > tfar) break;
pos += step;
}
return sum;
}
__global__ void
d_render(uint *d_output, uint imageW, uint imageH,
float dens1, float bright1,
float Offset1, float Scale1, float weight1)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float opacityThreshold = 0.95f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum1 = make_float4(0.0f);
sum1 += get_pix_val( 1, maxSteps, eyeRay, tstep, tnear, tfar, Offset1, Scale1, dens1, weight1, opacityThreshold );
sum1 *= bright1;
// write output color
d_output[y*imageW + x] = rgbaFloatToInt(sum1);
}
__constant__ float dmax, dmin;
__global__ void deviceDub2Char( float *input, unsigned char *out ){
int pos = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
float value = 255 * (input[pos] - dmin) / abs(dmax - dmin);
out[pos] = (unsigned char) value;
}
extern "C" void cudaInitVdens( FLOAT_GRID *dens1,
CHAR_GRID *vdens1,
float data_max1 )
{
int Xc = vdens1->count.x;
int Yc = vdens1->count.y;
int Zc = vdens1->count.z;
size_t float_size = Xc*Yc*Zc*sizeof(float);
/*
size_t freeMem, totalMem;
checkCudaErrors(hipMemGetInfo(&freeMem,&totalMem));
printf("\n Free Memory: %lu / %lu (float_size = %lu)\n",freeMem,totalMem,float_size);
fflush(stdout);
*/
float *ddata;
checkCudaErrors( hipMalloc( (void **) &ddata, float_size) );
checkCudaErrors( hipMemset( ddata, 0.0, float_size ) );
dim3 block(Xc);
dim3 grid(Yc,Zc);
size_t data_size = Xc*Yc*Zc*sizeof(unsigned char);
//int max_size = max( max( Xc, Yc) , Zc );
/*
checkCudaErrors(hipMemGetInfo(&freeMem,&totalMem));
printf("\n Free Memory: %lu / %lu (data_size = %lu)\n",freeMem,totalMem,data_size);
fflush(stdout);
*/
unsigned char *d_charvol;
checkCudaErrors( hipMalloc( (void**) &d_charvol, data_size ) );
checkCudaErrors( hipMemset( d_charvol, 0.0, data_size ) );
checkCudaErrors( hipMemcpy( ddata, dens1->matrix, float_size, hipMemcpyHostToDevice ) );
checkCudaErrors( hipHostMalloc( (void**) &vdens1->matrix, data_size, hipHostMallocPortable ) );
checkCudaErrors( hipMemcpyToSymbol( dmax, &data_max1, sizeof(float) ) );
checkCudaErrors( hipMemcpyToSymbol( dmin, &dens1->min, sizeof(float) ) );
hipLaunchKernelGGL(( deviceDub2Char), dim3(grid),dim3(block), 0, 0, ddata,d_charvol);
hipDeviceSynchronize();
checkCudaErrors( hipMemcpy( vdens1->matrix, d_charvol, data_size, hipMemcpyDeviceToHost ) );
checkCudaErrors(hipFree(d_charvol));
checkCudaErrors(hipFree(ddata));
/*
checkCudaErrors(hipMemGetInfo(&freeMem,&totalMem));
printf("\n Free Memory: %lu / %lu \n",freeMem,totalMem);
fflush(stdout);
*/
}
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>();
hipMemcpy3DParms copyParams1 = {0};
extern "C"
void initCudaDens( void *h_volume1, hipExtent volumeSize, int colorScale )
{
// create 3D array
checkCudaErrors( hipMalloc3DArray(&d_volumeArray1, &channelDesc, volumeSize) );
// copy data to 3D array
copyParams1.srcPtr = make_hipPitchedPtr(h_volume1, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams1.dstArray = d_volumeArray1;
copyParams1.extent = volumeSize;
copyParams1.kind = hipMemcpyHostToDevice;
checkCudaErrors( hipMemcpy3D(©Params1) );
// set texture parameters
tex1.normalized = true; // access with normalized texture coordinates
tex1.filterMode = hipFilterModeLinear; // linear interpolation
tex1.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates
tex1.addressMode[1] = hipAddressModeClamp;
// bind array to 3D texture
checkCudaErrors(hipBindTextureToArray(tex1, d_volumeArray1, channelDesc));
// create transfer function texture
float4 transferFunc3[] = {
{ 0.5, 0.0, 0.5, 1.0, },
{ 1.0, 0.0, 1.0, 1.0, },
{ 0.5, 0.0, 1.0, 1.0, },
{ 0.0, 0.0, 1.0, 1.0, },
{ 0.0, 0.5, 1.0, 1.0, },
{ 0.0, 1.0, 1.0, 1.0, },
{ 0.0, 1.0, 0.5, 1.0, },
{ 0.0, 0.0, 0.0, 0.0 },
{ 0.5, 1.0, 0.0, 1.0, },
{ 1.0, 1.0, 0.0, 1.0, },
{ 1.0, 0.825, 0.0, 1.0, },
{ 1.0, 0.65, 0.0, 1.0, },
{ 1.0, 0.325, 0.0, 1.0, },
{ 1.0, 0.0, 0.0, 1.0 },
{ 1.0, 0.5, 0.5, 1.0 },
};
float4 transferFunc2[] = {
{ 0.0, 0.0, 0.0, 0.0 },
{ 0.5, 0.0, 0.5, 1.0, },
{ 1.0, 0.0, 1.0, 1.0, },
{ 0.5, 0.0, 1.0, 1.0, },
{ 0.0, 0.0, 1.0, 1.0, },
{ 0.0, 0.5, 1.0, 1.0, },
{ 0.0, 1.0, 1.0, 1.0, },
{ 0.0, 1.0, 0.5, 1.0, },
{ 0.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 0.0, 1.0, },
{ 0.5, 1.0, 0.0, 1.0, },
{ 1.0, 1.0, 0.0, 1.0, },
{ 1.0, 0.825, 0.0, 1.0, },
{ 1.0, 0.65, 0.0, 1.0, },
{ 1.0, 0.325, 0.0, 1.0, },
{ 1.0, 0.0, 0.0, 1.0 },
{ 1.0, 0.5, 0.5, 1.0 },
{ 1.0, 1.0, 1.0, 1.0 }
};
float4 transferFunc1[] = {
{ 0.0, 0.0, 0.0, 0.0 },
{ 0.2, 0.2, 0.2, 1.0, },
{ 0.4, 0.4, 0.4, 1.0, },
{ 0.6, 0.6, 0.6, 1.0, },
{ 0.8, 0.8, 0.8, 1.0, },
{ 1.0, 1.0, 1.0, 1.0, }
};
hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>();
hipArray* d_transferFuncArray1;
if (colorScale <= 1)
{
checkCudaErrors(hipMallocArray( &d_transferFuncArray1, &channelDesc2, sizeof(transferFunc1)/sizeof(float4), 1));
checkCudaErrors(hipMemcpyToArray( d_transferFuncArray1, 0, 0, transferFunc1, sizeof(transferFunc1), hipMemcpyHostToDevice));
}
else if (colorScale == 2)
{
checkCudaErrors(hipMallocArray( &d_transferFuncArray1, &channelDesc2, sizeof(transferFunc2)/sizeof(float4), 1));
checkCudaErrors(hipMemcpyToArray( d_transferFuncArray1, 0, 0, transferFunc2, sizeof(transferFunc2), hipMemcpyHostToDevice));
}
else if (colorScale == 3)
{
checkCudaErrors(hipMallocArray( &d_transferFuncArray1, &channelDesc2, sizeof(transferFunc3)/sizeof(float4), 1));
checkCudaErrors(hipMemcpyToArray( d_transferFuncArray1, 0, 0, transferFunc3, sizeof(transferFunc3), hipMemcpyHostToDevice));
}
transferTex1.filterMode = hipFilterModeLinear;
transferTex1.normalized = true; // access with normalized texture coordinates
transferTex1.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates
// Bind the array to the texture
checkCudaErrors( hipBindTextureToArray( transferTex1, d_transferFuncArray1, channelDesc2));
}
extern "C"
void freeCudaBuffers()
{
checkCudaErrors(hipUnbindTexture(tex1));
checkCudaErrors(hipUnbindTexture(transferTex1));
hipFreeArray(d_transferFuncArray1);
}
extern "C"
void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH,
float dens1, float bright1, float Offset1, float Scale1, float weight1)
{
hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, d_output, imageW, imageH, dens1, bright1,
Offset1, Scale1, weight1 );
}
extern "C"
void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
checkCudaErrors( hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix, 0, hipMemcpyHostToDevice) );
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
| 0e2c9b3fd23ca1739bf01f5d79fbdf81d745a3ea.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <cuda_runtime_api.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include "defs.h"
typedef unsigned int uint;
typedef unsigned char uchar;
cudaArray *d_volumeArray1 = 0;
cudaArray *d_transferFuncArray1;
typedef unsigned char VolumeType;
texture<VolumeType, 3, cudaReadModeNormalizedFloat> tex1; // 3D texture
texture<float4, 1, cudaReadModeElementType> transferTex1; // 1D transfer function texture
typedef struct {
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
__constant__ int dID;
struct Ray {
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
__device__ float4
get_pix_val( int src, int maxSteps, Ray eyeRay, float tstep, float tnear, float tfar,
float Offset, float Scale, float dens, float weight, float opacity )
{
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
float4 sum = make_float4(0.0f);
for(int i=0; i<maxSteps; i++)
{
// read from 3D texture
// remap position to [0, 1] coordinates
float sample;
float4 col;
if (src == 1){
sample = tex3D(tex1, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
col = tex1D(transferTex1, (sample-Offset)*Scale);
}
//sample *= 64.0f; // scale for 10-bit data
// lookup in transfer function texture
col.w *= dens * weight;
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col*(1.0f - sum.w);
// exit early if opaque
//if (sum.w > opacity)
// break;
t += tstep;
if (t > tfar) break;
pos += step;
}
return sum;
}
__global__ void
d_render(uint *d_output, uint imageW, uint imageH,
float dens1, float bright1,
float Offset1, float Scale1, float weight1)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float opacityThreshold = 0.95f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum1 = make_float4(0.0f);
sum1 += get_pix_val( 1, maxSteps, eyeRay, tstep, tnear, tfar, Offset1, Scale1, dens1, weight1, opacityThreshold );
sum1 *= bright1;
// write output color
d_output[y*imageW + x] = rgbaFloatToInt(sum1);
}
__constant__ float dmax, dmin;
__global__ void deviceDub2Char( float *input, unsigned char *out ){
int pos = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
float value = 255 * (input[pos] - dmin) / abs(dmax - dmin);
out[pos] = (unsigned char) value;
}
extern "C" void cudaInitVdens( FLOAT_GRID *dens1,
CHAR_GRID *vdens1,
float data_max1 )
{
int Xc = vdens1->count.x;
int Yc = vdens1->count.y;
int Zc = vdens1->count.z;
size_t float_size = Xc*Yc*Zc*sizeof(float);
/*
size_t freeMem, totalMem;
checkCudaErrors(cudaMemGetInfo(&freeMem,&totalMem));
printf("\n Free Memory: %lu / %lu (float_size = %lu)\n",freeMem,totalMem,float_size);
fflush(stdout);
*/
float *ddata;
checkCudaErrors( cudaMalloc( (void **) &ddata, float_size) );
checkCudaErrors( cudaMemset( ddata, 0.0, float_size ) );
dim3 block(Xc);
dim3 grid(Yc,Zc);
size_t data_size = Xc*Yc*Zc*sizeof(unsigned char);
//int max_size = max( max( Xc, Yc) , Zc );
/*
checkCudaErrors(cudaMemGetInfo(&freeMem,&totalMem));
printf("\n Free Memory: %lu / %lu (data_size = %lu)\n",freeMem,totalMem,data_size);
fflush(stdout);
*/
unsigned char *d_charvol;
checkCudaErrors( cudaMalloc( (void**) &d_charvol, data_size ) );
checkCudaErrors( cudaMemset( d_charvol, 0.0, data_size ) );
checkCudaErrors( cudaMemcpy( ddata, dens1->matrix, float_size, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaHostAlloc( (void**) &vdens1->matrix, data_size, cudaHostAllocPortable ) );
checkCudaErrors( cudaMemcpyToSymbol( dmax, &data_max1, sizeof(float) ) );
checkCudaErrors( cudaMemcpyToSymbol( dmin, &dens1->min, sizeof(float) ) );
deviceDub2Char<<<grid,block>>>(ddata,d_charvol);
cudaThreadSynchronize();
checkCudaErrors( cudaMemcpy( vdens1->matrix, d_charvol, data_size, cudaMemcpyDeviceToHost ) );
checkCudaErrors(cudaFree(d_charvol));
checkCudaErrors(cudaFree(ddata));
/*
checkCudaErrors(cudaMemGetInfo(&freeMem,&totalMem));
printf("\n Free Memory: %lu / %lu \n",freeMem,totalMem);
fflush(stdout);
*/
}
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>();
cudaMemcpy3DParms copyParams1 = {0};
extern "C"
void initCudaDens( void *h_volume1, cudaExtent volumeSize, int colorScale )
{
// create 3D array
checkCudaErrors( cudaMalloc3DArray(&d_volumeArray1, &channelDesc, volumeSize) );
// copy data to 3D array
copyParams1.srcPtr = make_cudaPitchedPtr(h_volume1, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams1.dstArray = d_volumeArray1;
copyParams1.extent = volumeSize;
copyParams1.kind = cudaMemcpyHostToDevice;
checkCudaErrors( cudaMemcpy3D(©Params1) );
// set texture parameters
tex1.normalized = true; // access with normalized texture coordinates
tex1.filterMode = cudaFilterModeLinear; // linear interpolation
tex1.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates
tex1.addressMode[1] = cudaAddressModeClamp;
// bind array to 3D texture
checkCudaErrors(cudaBindTextureToArray(tex1, d_volumeArray1, channelDesc));
// create transfer function texture
float4 transferFunc3[] = {
{ 0.5, 0.0, 0.5, 1.0, },
{ 1.0, 0.0, 1.0, 1.0, },
{ 0.5, 0.0, 1.0, 1.0, },
{ 0.0, 0.0, 1.0, 1.0, },
{ 0.0, 0.5, 1.0, 1.0, },
{ 0.0, 1.0, 1.0, 1.0, },
{ 0.0, 1.0, 0.5, 1.0, },
{ 0.0, 0.0, 0.0, 0.0 },
{ 0.5, 1.0, 0.0, 1.0, },
{ 1.0, 1.0, 0.0, 1.0, },
{ 1.0, 0.825, 0.0, 1.0, },
{ 1.0, 0.65, 0.0, 1.0, },
{ 1.0, 0.325, 0.0, 1.0, },
{ 1.0, 0.0, 0.0, 1.0 },
{ 1.0, 0.5, 0.5, 1.0 },
};
float4 transferFunc2[] = {
{ 0.0, 0.0, 0.0, 0.0 },
{ 0.5, 0.0, 0.5, 1.0, },
{ 1.0, 0.0, 1.0, 1.0, },
{ 0.5, 0.0, 1.0, 1.0, },
{ 0.0, 0.0, 1.0, 1.0, },
{ 0.0, 0.5, 1.0, 1.0, },
{ 0.0, 1.0, 1.0, 1.0, },
{ 0.0, 1.0, 0.5, 1.0, },
{ 0.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 0.0, 1.0, },
{ 0.5, 1.0, 0.0, 1.0, },
{ 1.0, 1.0, 0.0, 1.0, },
{ 1.0, 0.825, 0.0, 1.0, },
{ 1.0, 0.65, 0.0, 1.0, },
{ 1.0, 0.325, 0.0, 1.0, },
{ 1.0, 0.0, 0.0, 1.0 },
{ 1.0, 0.5, 0.5, 1.0 },
{ 1.0, 1.0, 1.0, 1.0 }
};
float4 transferFunc1[] = {
{ 0.0, 0.0, 0.0, 0.0 },
{ 0.2, 0.2, 0.2, 1.0, },
{ 0.4, 0.4, 0.4, 1.0, },
{ 0.6, 0.6, 0.6, 1.0, },
{ 0.8, 0.8, 0.8, 1.0, },
{ 1.0, 1.0, 1.0, 1.0, }
};
cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>();
cudaArray* d_transferFuncArray1;
if (colorScale <= 1)
{
checkCudaErrors(cudaMallocArray( &d_transferFuncArray1, &channelDesc2, sizeof(transferFunc1)/sizeof(float4), 1));
checkCudaErrors(cudaMemcpyToArray( d_transferFuncArray1, 0, 0, transferFunc1, sizeof(transferFunc1), cudaMemcpyHostToDevice));
}
else if (colorScale == 2)
{
checkCudaErrors(cudaMallocArray( &d_transferFuncArray1, &channelDesc2, sizeof(transferFunc2)/sizeof(float4), 1));
checkCudaErrors(cudaMemcpyToArray( d_transferFuncArray1, 0, 0, transferFunc2, sizeof(transferFunc2), cudaMemcpyHostToDevice));
}
else if (colorScale == 3)
{
checkCudaErrors(cudaMallocArray( &d_transferFuncArray1, &channelDesc2, sizeof(transferFunc3)/sizeof(float4), 1));
checkCudaErrors(cudaMemcpyToArray( d_transferFuncArray1, 0, 0, transferFunc3, sizeof(transferFunc3), cudaMemcpyHostToDevice));
}
transferTex1.filterMode = cudaFilterModeLinear;
transferTex1.normalized = true; // access with normalized texture coordinates
transferTex1.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates
// Bind the array to the texture
checkCudaErrors( cudaBindTextureToArray( transferTex1, d_transferFuncArray1, channelDesc2));
}
extern "C"
void freeCudaBuffers()
{
checkCudaErrors(cudaUnbindTexture(tex1));
checkCudaErrors(cudaUnbindTexture(transferTex1));
cudaFreeArray(d_transferFuncArray1);
}
extern "C"
void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH,
float dens1, float bright1, float Offset1, float Scale1, float weight1)
{
d_render<<<gridSize, blockSize>>>( d_output, imageW, imageH, dens1, bright1,
Offset1, Scale1, weight1 );
}
extern "C"
void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
checkCudaErrors( cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix, 0, cudaMemcpyHostToDevice) );
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
|
_bias.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __SP_FFT_BIAS__
#define __SP_FFT_BIAS__
#include "THH.h"
// _add_bias and _fill_gradBias : code from torch (likely by Sixin)
// blockIdx.x -> d
// threadIdx.x -> (m,n) [+blockDim.x]
// threadIdx.y -> z [+blockDim.y]
static __global__ void _add_bias(const float *bias, float *output,
int batch_n, int output_n, int output_h,
int output_w) {
output += blockIdx.x*output_h*output_w;
float b = bias[blockIdx.x];
int oz,oxy;
for (oz = threadIdx.y; oz < batch_n; oz += blockDim.y) {
float *out = output + oz*output_n*output_h*output_w;
for (oxy = threadIdx.x; oxy < output_h*output_w; oxy += blockDim.x) {
out[oxy] += b;
}
}
}
// ASSUME
// dim3 blocks(nOutputPlane);
// dim3 threads(32,4);
// blockIdx.x -> d
// threadIdx.x -> (m,n) [+blockDim.x]
// threadIdx.y -> z [+blockDim.y]
__global__ void _fill_gradBias(float *gradBias, const float *gradOutput, float scale,
int batch_n, int output_n,
int output_h, int output_w) {
gradOutput += blockIdx.x*output_h*output_w;
__shared__ float shGrad[128]; // 32*4
float g = .0f;
int oz,oxy;
for (oz = threadIdx.y; oz < batch_n; oz += 4) {
const float *out = gradOutput + oz*output_n*output_h*output_w;
for (oxy = threadIdx.x; oxy < output_h*output_w; oxy += 32) {
g += out[oxy];
}
}
shGrad[threadIdx.y*blockDim.x+threadIdx.x] = g;
__syncthreads();
// reduce
if (threadIdx.x == 0) {
g = .0f;
for (oxy = 0; oxy < 128; ++oxy)
g += shGrad[oxy];
gradBias[blockIdx.x] = scale*g;
}
}
#endif | _bias.cu | #ifndef __SP_FFT_BIAS__
#define __SP_FFT_BIAS__
#include "THC.h"
// _add_bias and _fill_gradBias : code from torch (likely by Sixin)
// blockIdx.x -> d
// threadIdx.x -> (m,n) [+blockDim.x]
// threadIdx.y -> z [+blockDim.y]
static __global__ void _add_bias(const float *bias, float *output,
int batch_n, int output_n, int output_h,
int output_w) {
output += blockIdx.x*output_h*output_w;
float b = bias[blockIdx.x];
int oz,oxy;
for (oz = threadIdx.y; oz < batch_n; oz += blockDim.y) {
float *out = output + oz*output_n*output_h*output_w;
for (oxy = threadIdx.x; oxy < output_h*output_w; oxy += blockDim.x) {
out[oxy] += b;
}
}
}
// ASSUME
// dim3 blocks(nOutputPlane);
// dim3 threads(32,4);
// blockIdx.x -> d
// threadIdx.x -> (m,n) [+blockDim.x]
// threadIdx.y -> z [+blockDim.y]
__global__ void _fill_gradBias(float *gradBias, const float *gradOutput, float scale,
int batch_n, int output_n,
int output_h, int output_w) {
gradOutput += blockIdx.x*output_h*output_w;
__shared__ float shGrad[128]; // 32*4
float g = .0f;
int oz,oxy;
for (oz = threadIdx.y; oz < batch_n; oz += 4) {
const float *out = gradOutput + oz*output_n*output_h*output_w;
for (oxy = threadIdx.x; oxy < output_h*output_w; oxy += 32) {
g += out[oxy];
}
}
shGrad[threadIdx.y*blockDim.x+threadIdx.x] = g;
__syncthreads();
// reduce
if (threadIdx.x == 0) {
g = .0f;
for (oxy = 0; oxy < 128; ++oxy)
g += shGrad[oxy];
gradBias[blockIdx.x] = scale*g;
}
}
#endif |
39ac2365283ea2f18a6cff8f8b816a5090f430ee.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<math.h>
int main(int argc, char** argv)
{
float *d_a, *d_b, *d_c;
size_t pitch;
int row = 0;
int i = 4;
while (1)
{
row = pow(2, i);
hipMallocPitch(&d_a, &pitch, row * sizeof(float), row);
if (!d_a)
{
printf("memory failed for 2^%d\n", i);
return 1;
}
hipMallocPitch(&d_b, &pitch, row * sizeof(float), row);
if (!d_b)
{
printf("memory failed for 2^%d\n", i);
hipFree(d_a);
return 1;
}
hipMallocPitch(&d_c, &pitch, row * sizeof(float), row);
if (!d_c)
{
printf("memory failed for 2^%d\n", i);
hipFree(d_a); hipFree(d_b);
return 1;
}
printf("memory alloted for 2^%d x 2^%d\n", i, i);
++i;
}
}
| 39ac2365283ea2f18a6cff8f8b816a5090f430ee.cu | #include<stdio.h>
#include<math.h>
int main(int argc, char** argv)
{
float *d_a, *d_b, *d_c;
size_t pitch;
int row = 0;
int i = 4;
while (1)
{
row = pow(2, i);
cudaMallocPitch(&d_a, &pitch, row * sizeof(float), row);
if (!d_a)
{
printf("memory failed for 2^%d\n", i);
return 1;
}
cudaMallocPitch(&d_b, &pitch, row * sizeof(float), row);
if (!d_b)
{
printf("memory failed for 2^%d\n", i);
cudaFree(d_a);
return 1;
}
cudaMallocPitch(&d_c, &pitch, row * sizeof(float), row);
if (!d_c)
{
printf("memory failed for 2^%d\n", i);
cudaFree(d_a); cudaFree(d_b);
return 1;
}
printf("memory alloted for 2^%d x 2^%d\n", i, i);
++i;
}
}
|
5ae60689a23cc9249d4d26831d2adefbbd17e8a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "jacketSDK.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "hip/device_functions.h"
#define SPDS 15
#define TPB 64
__global__ void pre_collideD3Q15(float * fEq,float * fIn,
float * ex, float * ey,
float * ez,
float * ux_ip,float * ux_op,
float * w, int * inl,
int * onl, int nnodes){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
__shared__ float fIn_s[TPB][SPDS];
for(int spd=0;spd<SPDS;spd++){
fIn_s[threadIdx.x][spd]=fIn[spd*nnodes+tid];
}
//get macroscopic velocity and density.
float ux = 0.; float uy = 0.; float uz = 0.; float rho = 0.;
float f_tmp; float cu;
for(int spd=0;spd<SPDS; spd++){
f_tmp = fIn_s[threadIdx.x][spd];
rho+=f_tmp;
ux+=ex[spd]*f_tmp;
uy+=ey[spd]*f_tmp;
uz+=ez[spd]*f_tmp;
}
//yes, I know, I should ensure rho not equal zero...
ux = ux/rho;
uy = uy/rho;
uz = uz/rho;
//if I'm a boundary node, apply bc.
if(inl[tid]==1){
float dx = ux_ip[tid]-ux;
float dy = -uy;
float dz =-uz;
for(int spd=0;spd<SPDS;spd++){
cu= 3.0*(ex[spd]*dx+ey[spd]*dy+ez[spd]*dz);
fIn_s[threadIdx.x][spd]+=w[spd]*rho*cu;
//write updated fIn back to global memory.
fIn[spd*nnodes+tid]=fIn_s[threadIdx.x][spd];
}
ux +=dx;
uy +=dy;
uz +=dz;
}
if(onl[tid]==1){
float dx = ux_op[tid]-ux;
float dy = -uy;
float dz =-uz;
for(int spd=0;spd<SPDS;spd++){
cu= 3.0*(ex[spd]*dx+ey[spd]*dy+ez[spd]*dz);
fIn_s[threadIdx.x][spd]+=w[spd]*rho*cu;
//write updated fIn back to global memory.
fIn[spd*nnodes+tid]=fIn_s[threadIdx.x][spd];
}
ux +=dx;
uy +=dy;
uz +=dz;
}
//now, compute fEq
for(int spd=0;spd<SPDS;spd++){
cu = 3.0*(ex[spd]*ux+ey[spd]*uy+ez[spd]*uz);
fEq[nnodes*spd+tid]=w[spd]*rho*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+
uy*uy+
uz*uz));
}
}
}
err_t jktFunction(int nlhs, mxArray * plhs[], int nrhs, mxArray * prhs[]){
if(nrhs!=10)
return err("Usage: pc_Pois_D3Q15_jkt(fIn,fEq,ex,ey,ez,ux_ip,ux_op,w,inl,onl");
mxArray * m_fIn = prhs[0];
mxArray * m_fEq = prhs[1];
mxArray * m_ex = prhs[2];
mxArray * m_ey = prhs[3];
mxArray * m_ez = prhs[4];
mxArray * m_ux_ip = prhs[5];
mxArray * m_ux_op = prhs[6];
mxArray * m_w = prhs[7];
mxArray * m_inl = prhs[8];
mxArray * m_onl = prhs[9];
mxClassID cls=jkt_class(m_fIn);
const mwSize * dims;
int stat = jkt_dims(m_fIn,&dims);
int nnodes = dims[0];
float * fEq_d;
float * fIn_d;
float * ex_d;
float * ey_d;
float * ez_d;
float * ux_ip_d;
float * ux_op_d;
float * w_d;
int * inl_d;
int * onl_d;
jkt_mem((void**)&fEq_d,m_fEq);
jkt_mem((void**)&fIn_d,m_fIn);
jkt_mem((void**)&ex_d,m_ex);
jkt_mem((void**)&ey_d,m_ey);
jkt_mem((void**)&ez_d,m_ez);
jkt_mem((void**)&w_d,m_w);
jkt_mem((void**)&ux_ip_d,m_ux_ip);
jkt_mem((void**)&ux_op_d,m_ux_op);
jkt_mem((void**)&inl_d,m_inl);
jkt_mem((void**)&onl_d,m_onl);
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((nnodes+TPB-1)/TPB,1,1);
hipLaunchKernelGGL(( pre_collideD3Q15), dim3(GRIDS),dim3(BLOCKS), 0, 0, fEq_d,fIn_d,ex_d,ey_d,
ez_d,ux_ip_d,ux_op_d,
w_d,inl_d,onl_d,nnodes);
return errNone;
}
| 5ae60689a23cc9249d4d26831d2adefbbd17e8a1.cu | #include "jacketSDK.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_functions.h"
#define SPDS 15
#define TPB 64
__global__ void pre_collideD3Q15(float * fEq,float * fIn,
float * ex, float * ey,
float * ez,
float * ux_ip,float * ux_op,
float * w, int * inl,
int * onl, int nnodes){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
__shared__ float fIn_s[TPB][SPDS];
for(int spd=0;spd<SPDS;spd++){
fIn_s[threadIdx.x][spd]=fIn[spd*nnodes+tid];
}
//get macroscopic velocity and density.
float ux = 0.; float uy = 0.; float uz = 0.; float rho = 0.;
float f_tmp; float cu;
for(int spd=0;spd<SPDS; spd++){
f_tmp = fIn_s[threadIdx.x][spd];
rho+=f_tmp;
ux+=ex[spd]*f_tmp;
uy+=ey[spd]*f_tmp;
uz+=ez[spd]*f_tmp;
}
//yes, I know, I should ensure rho not equal zero...
ux = ux/rho;
uy = uy/rho;
uz = uz/rho;
//if I'm a boundary node, apply bc.
if(inl[tid]==1){
float dx = ux_ip[tid]-ux;
float dy = -uy;
float dz =-uz;
for(int spd=0;spd<SPDS;spd++){
cu= 3.0*(ex[spd]*dx+ey[spd]*dy+ez[spd]*dz);
fIn_s[threadIdx.x][spd]+=w[spd]*rho*cu;
//write updated fIn back to global memory.
fIn[spd*nnodes+tid]=fIn_s[threadIdx.x][spd];
}
ux +=dx;
uy +=dy;
uz +=dz;
}
if(onl[tid]==1){
float dx = ux_op[tid]-ux;
float dy = -uy;
float dz =-uz;
for(int spd=0;spd<SPDS;spd++){
cu= 3.0*(ex[spd]*dx+ey[spd]*dy+ez[spd]*dz);
fIn_s[threadIdx.x][spd]+=w[spd]*rho*cu;
//write updated fIn back to global memory.
fIn[spd*nnodes+tid]=fIn_s[threadIdx.x][spd];
}
ux +=dx;
uy +=dy;
uz +=dz;
}
//now, compute fEq
for(int spd=0;spd<SPDS;spd++){
cu = 3.0*(ex[spd]*ux+ey[spd]*uy+ez[spd]*uz);
fEq[nnodes*spd+tid]=w[spd]*rho*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+
uy*uy+
uz*uz));
}
}
}
err_t jktFunction(int nlhs, mxArray * plhs[], int nrhs, mxArray * prhs[]){
if(nrhs!=10)
return err("Usage: pc_Pois_D3Q15_jkt(fIn,fEq,ex,ey,ez,ux_ip,ux_op,w,inl,onl");
mxArray * m_fIn = prhs[0];
mxArray * m_fEq = prhs[1];
mxArray * m_ex = prhs[2];
mxArray * m_ey = prhs[3];
mxArray * m_ez = prhs[4];
mxArray * m_ux_ip = prhs[5];
mxArray * m_ux_op = prhs[6];
mxArray * m_w = prhs[7];
mxArray * m_inl = prhs[8];
mxArray * m_onl = prhs[9];
mxClassID cls=jkt_class(m_fIn);
const mwSize * dims;
int stat = jkt_dims(m_fIn,&dims);
int nnodes = dims[0];
float * fEq_d;
float * fIn_d;
float * ex_d;
float * ey_d;
float * ez_d;
float * ux_ip_d;
float * ux_op_d;
float * w_d;
int * inl_d;
int * onl_d;
jkt_mem((void**)&fEq_d,m_fEq);
jkt_mem((void**)&fIn_d,m_fIn);
jkt_mem((void**)&ex_d,m_ex);
jkt_mem((void**)&ey_d,m_ey);
jkt_mem((void**)&ez_d,m_ez);
jkt_mem((void**)&w_d,m_w);
jkt_mem((void**)&ux_ip_d,m_ux_ip);
jkt_mem((void**)&ux_op_d,m_ux_op);
jkt_mem((void**)&inl_d,m_inl);
jkt_mem((void**)&onl_d,m_onl);
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((nnodes+TPB-1)/TPB,1,1);
pre_collideD3Q15<<<GRIDS,BLOCKS>>>(fEq_d,fIn_d,ex_d,ey_d,
ez_d,ux_ip_d,ux_op_d,
w_d,inl_d,onl_d,nnodes);
return errNone;
}
|
3cf8523aaae7001571db3834d266dcda559568bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
static __device__ void daxpy(double a,double *b, double *c) {
c[0] += a * b[0];
c[1] += a * b[1];
c[2] += a * b[2];
c[3] += a * b[3];
c[4] += a * b[4];
c[5] += a * b[5];
c[6] += a * b[6];
c[7] += a * b[7];
c[8] += a * b[8];
c[9] += a * b[9];
c[10] += a * b[10];
c[11] += a * b[11];
c[12] += a * b[12];
c[13] += a * b[13];
c[14] += a * b[14];
c[15] += a * b[15];
}
__global__ void
dgemm_kernel_T_N_32_32_8_8_8(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose:
========
This routine computes
C = alpha* A^T*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=32 blk_N=32 blk_K=8 nthd_x=8 nthd_y=8
This code should run for any matrix size.
=============================================================== */
const int ibx = blockIdx.x *32;
const int iby = blockIdx.y *32;
const int tx = threadIdx.y ;
const int ty = threadIdx.x ;
int idt = tx * 8 + ty;
if( ty >=k )
A += __mul24(ibx ,lda)+0;
else
A += __mul24(ibx ,lda)+ty;
if( (ibx + tx ) >= m )
A += __mul24(0,lda);
else
A += __mul24(tx,lda);
if( (iby+tx) >=n )
B+= __mul24(iby+0,ldb);
else
B+= __mul24(iby+tx,ldb) ;
if( ty>=k)
B+=0;
else
B+= ty;
C += ibx +idt%32 +__mul24( iby+16*(idt/32),ldc);
lda = lda *8 ;
ldb = ldb *8 ;
int as1=0, as2=lda, as3=2*lda , as4 =3*lda;
int bs1=0 , bs2=ldb , bs3=2*ldb , bs4=3*ldb ;
switch(k){
case 1: as2=0 ; as3 = 0*lda;as4=0; bs2=0 ; bs3 = 0*ldb; bs4=0; break;
case 2: as2=lda ; as3 = 0*lda;as4=0; bs2=ldb ; bs3 = 0*ldb; bs4=0; break;
case 3: as2=lda ; as3 = 2*lda;as4=0; bs2=ldb ; bs3 = 2*ldb; bs4=0; break;
}
if( (ibx + tx ) >=m ) { as1=0; as2=0*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +8 ) >=m ) { as1=0; as2=0*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +16) >=m ) { as1=0; as2=1*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +24) >=m ) { as1=0; as2=1*lda; as3=2*lda ; as4 =0*lda; }
if( (iby + tx ) >=n ) { bs1=0; bs2=0*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +8 ) >=n ) { bs1=0; bs2=0*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +16) >=n ) { bs1=0; bs2=1*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +24) >=n ) { bs1=0; bs2=1*ldb; bs3=2*ldb ; bs4 =0*ldb; }
double b= B[bs1];
double b1=B[bs2];
double b2=B[bs3];
double b3=B[bs4];
double Ap[4]={A[as1], A[as2], A[as3],A[as4]};
const double *Bend = B + (k-k%8);
B+=8;
A+=8;
__shared__ double Bb[8][33];
__shared__ double ABb[32][9];
double Cb[16] = {0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0};
const int l = 17*(idt/32) ;
int idt1 = idt ;
idt = idt % 32 ;
if(k>15){
do {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
daxpy(ABb[idt][0], &Bb[0][l], Cb);Ap[0]=A[as1];
daxpy(ABb[idt][1], &Bb[1][l], Cb);Ap[1]=A[as2];
daxpy(ABb[idt][2], &Bb[2][l], Cb);Ap[2]=A[as3];
daxpy(ABb[idt][3], &Bb[3][l], Cb);Ap[3]=A[as4];
daxpy(ABb[idt][4], &Bb[4][l], Cb);
b=B[bs1];
daxpy(ABb[idt][5], &Bb[5][l], Cb);
b1=B[bs2];
daxpy(ABb[idt][6], &Bb[6][l], Cb);
b2=B[bs3];
daxpy(ABb[idt][7], &Bb[7][l], Cb);
b3=B[bs4];
B += 8;
A += 8;
__syncthreads();
} while (B < Bend);
}
if(k>7){
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
as1 = k-k%8;
if(as1+ty>=k){ bs1=0*ldb;bs2=0*ldb;bs3=0*ldb;bs4=0*ldb;B-=8;}
if(as1+ty>=k){ as1=0*lda;as2=0*lda;as3=0*lda;as4=0*lda;A-=8;}
as1=0;
daxpy(ABb[idt][0], &Bb[0][l], Cb);
Ap[0]=A[as1];
daxpy(ABb[idt][1], &Bb[1][l], Cb);
Ap[1]=A[as2];
daxpy(ABb[idt][2], &Bb[2][l], Cb);
Ap[2]=A[as3];
daxpy(ABb[idt][3], &Bb[3][l], Cb);
Ap[3]=A[as4];
daxpy(ABb[idt][4], &Bb[4][l], Cb);
b=B[bs1];
daxpy(ABb[idt][5], &Bb[5][l], Cb);
b1=B[bs2];
daxpy(ABb[idt][6], &Bb[6][l], Cb);
b2=B[bs3];
daxpy(ABb[idt][7], &Bb[7][l], Cb);
b3=B[bs4];
}
k=k%8;
if ( k!=0){
__syncthreads();
Bb[ty][tx]= b;
Bb[ty][tx+8] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx][ty]= Ap[0];
ABb[tx+8][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
for(int i=0;i<k;i++){
daxpy(ABb[idt][i],&Bb[i][l], Cb);
}
}
if( (iby+16*(idt1/32+1))>=n) {
lda = n-iby-16*(idt1/32);
}
else {
lda = 16;
}
if( (ibx+idt) >= m )
lda = 0 ;
else lda = lda ;
switch(lda){
case 16:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
C[15*ldc] =alpha*Cb[15] + beta * C[15*ldc];
break;
case 15:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
break;
case 14:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
break;
case 13:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
break;
case 12:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
break;
case 11:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0] =alpha*Cb[0] + beta * C[0];
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_kernel_T_N_32_32_8_8_8(double *C,
const double *A,
const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 8, 8 );
dim3 grid(m/32+(m%32!=0),n/32+(n%32!=0));
hipLaunchKernelGGL(( dgemm_kernel_T_N_32_32_8_8_8), dim3(grid), dim3(threads), 0, magma_stream , C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta);
}
| 3cf8523aaae7001571db3834d266dcda559568bd.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
static __device__ void daxpy(double a,double *b, double *c) {
c[0] += a * b[0];
c[1] += a * b[1];
c[2] += a * b[2];
c[3] += a * b[3];
c[4] += a * b[4];
c[5] += a * b[5];
c[6] += a * b[6];
c[7] += a * b[7];
c[8] += a * b[8];
c[9] += a * b[9];
c[10] += a * b[10];
c[11] += a * b[11];
c[12] += a * b[12];
c[13] += a * b[13];
c[14] += a * b[14];
c[15] += a * b[15];
}
__global__ void
dgemm_kernel_T_N_32_32_8_8_8(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose:
========
This routine computes
C = alpha* A^T*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=32 blk_N=32 blk_K=8 nthd_x=8 nthd_y=8
This code should run for any matrix size.
=============================================================== */
const int ibx = blockIdx.x *32;
const int iby = blockIdx.y *32;
const int tx = threadIdx.y ;
const int ty = threadIdx.x ;
int idt = tx * 8 + ty;
if( ty >=k )
A += __mul24(ibx ,lda)+0;
else
A += __mul24(ibx ,lda)+ty;
if( (ibx + tx ) >= m )
A += __mul24(0,lda);
else
A += __mul24(tx,lda);
if( (iby+tx) >=n )
B+= __mul24(iby+0,ldb);
else
B+= __mul24(iby+tx,ldb) ;
if( ty>=k)
B+=0;
else
B+= ty;
C += ibx +idt%32 +__mul24( iby+16*(idt/32),ldc);
lda = lda *8 ;
ldb = ldb *8 ;
int as1=0, as2=lda, as3=2*lda , as4 =3*lda;
int bs1=0 , bs2=ldb , bs3=2*ldb , bs4=3*ldb ;
switch(k){
case 1: as2=0 ; as3 = 0*lda;as4=0; bs2=0 ; bs3 = 0*ldb; bs4=0; break;
case 2: as2=lda ; as3 = 0*lda;as4=0; bs2=ldb ; bs3 = 0*ldb; bs4=0; break;
case 3: as2=lda ; as3 = 2*lda;as4=0; bs2=ldb ; bs3 = 2*ldb; bs4=0; break;
}
if( (ibx + tx ) >=m ) { as1=0; as2=0*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +8 ) >=m ) { as1=0; as2=0*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +16) >=m ) { as1=0; as2=1*lda; as3=0*lda ; as4 =0*lda; } else
if( (ibx + tx +24) >=m ) { as1=0; as2=1*lda; as3=2*lda ; as4 =0*lda; }
if( (iby + tx ) >=n ) { bs1=0; bs2=0*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +8 ) >=n ) { bs1=0; bs2=0*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +16) >=n ) { bs1=0; bs2=1*ldb; bs3=0*ldb ; bs4 =0*ldb; } else
if( (iby + tx +24) >=n ) { bs1=0; bs2=1*ldb; bs3=2*ldb ; bs4 =0*ldb; }
double b= B[bs1];
double b1=B[bs2];
double b2=B[bs3];
double b3=B[bs4];
double Ap[4]={A[as1], A[as2], A[as3],A[as4]};
const double *Bend = B + (k-k%8);
B+=8;
A+=8;
__shared__ double Bb[8][33];
__shared__ double ABb[32][9];
double Cb[16] = {0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0};
const int l = 17*(idt/32) ;
int idt1 = idt ;
idt = idt % 32 ;
if(k>15){
do {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
daxpy(ABb[idt][0], &Bb[0][l], Cb);Ap[0]=A[as1];
daxpy(ABb[idt][1], &Bb[1][l], Cb);Ap[1]=A[as2];
daxpy(ABb[idt][2], &Bb[2][l], Cb);Ap[2]=A[as3];
daxpy(ABb[idt][3], &Bb[3][l], Cb);Ap[3]=A[as4];
daxpy(ABb[idt][4], &Bb[4][l], Cb);
b=B[bs1];
daxpy(ABb[idt][5], &Bb[5][l], Cb);
b1=B[bs2];
daxpy(ABb[idt][6], &Bb[6][l], Cb);
b2=B[bs3];
daxpy(ABb[idt][7], &Bb[7][l], Cb);
b3=B[bs4];
B += 8;
A += 8;
__syncthreads();
} while (B < Bend);
}
if(k>7){
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
as1 = k-k%8;
if(as1+ty>=k){ bs1=0*ldb;bs2=0*ldb;bs3=0*ldb;bs4=0*ldb;B-=8;}
if(as1+ty>=k){ as1=0*lda;as2=0*lda;as3=0*lda;as4=0*lda;A-=8;}
as1=0;
daxpy(ABb[idt][0], &Bb[0][l], Cb);
Ap[0]=A[as1];
daxpy(ABb[idt][1], &Bb[1][l], Cb);
Ap[1]=A[as2];
daxpy(ABb[idt][2], &Bb[2][l], Cb);
Ap[2]=A[as3];
daxpy(ABb[idt][3], &Bb[3][l], Cb);
Ap[3]=A[as4];
daxpy(ABb[idt][4], &Bb[4][l], Cb);
b=B[bs1];
daxpy(ABb[idt][5], &Bb[5][l], Cb);
b1=B[bs2];
daxpy(ABb[idt][6], &Bb[6][l], Cb);
b2=B[bs3];
daxpy(ABb[idt][7], &Bb[7][l], Cb);
b3=B[bs4];
}
k=k%8;
if ( k!=0){
__syncthreads();
Bb[ty][tx]= b;
Bb[ty][tx+8] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx][ty]= Ap[0];
ABb[tx+8][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
for(int i=0;i<k;i++){
daxpy(ABb[idt][i],&Bb[i][l], Cb);
}
}
if( (iby+16*(idt1/32+1))>=n) {
lda = n-iby-16*(idt1/32);
}
else {
lda = 16;
}
if( (ibx+idt) >= m )
lda = 0 ;
else lda = lda ;
switch(lda){
case 16:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
C[15*ldc] =alpha*Cb[15] + beta * C[15*ldc];
break;
case 15:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
C[14*ldc] =alpha*Cb[14] + beta * C[14*ldc];
break;
case 14:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
C[13*ldc] =alpha*Cb[13] + beta * C[13*ldc];
break;
case 13:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
C[12*ldc] =alpha*Cb[12] + beta * C[12*ldc];
break;
case 12:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
C[11*ldc] =alpha*Cb[11] + beta * C[11*ldc];
break;
case 11:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
C[10*ldc] =alpha*Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
C[9*ldc] =alpha*Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
C[8*ldc] =alpha*Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
C[7*ldc] =alpha*Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
C[6*ldc] =alpha*Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
C[5*ldc] =alpha*Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
C[4*ldc] =alpha*Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
C[3*ldc] =alpha*Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
C[2*ldc] =alpha*Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0] =alpha*Cb[0] + beta * C[0];
C[1*ldc] =alpha*Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0] =alpha*Cb[0] + beta * C[0];
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_kernel_T_N_32_32_8_8_8(double *C,
const double *A,
const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 8, 8 );
dim3 grid(m/32+(m%32!=0),n/32+(n%32!=0));
dgemm_kernel_T_N_32_32_8_8_8<<< grid, threads, 0, magma_stream >>>(C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta);
}
|
14e39a10a7c6ffb4c558369e96c35f8b9e220732.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void box_iou_cuda_kernel(float * box_iou, float4 * box1, float4 * box2, long M, long N, int idxJump) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t b1_idx, b2_idx, b1_row_offset, b2_row_offset;
float xmin1, xmin2, xmax1, xmax2, ymin1, ymin2, ymax1, ymax2, x_tl, y_tl, x_br, y_br, w, h, inter, area1, area2, iou;
for (long i=idx; i<M*N; i+=idxJump){
b1_idx = i/N;
b2_idx = i%N;
b1_row_offset = b1_idx;
b2_row_offset = b2_idx;
xmin1 = box1[b1_row_offset].x;
ymin1 = box1[b1_row_offset].y;
xmax1 = box1[b1_row_offset].z;
ymax1 = box1[b1_row_offset].w;
xmin2 = box2[b2_row_offset].x;
ymin2 = box2[b2_row_offset].y;
xmax2 = box2[b2_row_offset].z;
ymax2 = box2[b2_row_offset].w;
x_tl = fmaxf(xmin1,xmin2);
y_tl = fmaxf(ymin1,ymin2);
x_br = fminf(xmax1,xmax2);
y_br = fminf(ymax1,ymax2);
w = (x_br - x_tl + 1)<0 ? 0.0f:(x_br - x_tl + 1);
h = (y_br - y_tl + 1)<0 ? 0.0f:(y_br - y_tl + 1);
inter = w*h;
area1 = (xmax1-xmin1+1)*(ymax1-ymin1+1);
area2 = (xmax2-xmin2+1)*(ymax2-ymin2+1);
iou = inter/(area1+area2-inter);
box_iou[b1_idx*N+b2_idx]=iou;
}
} | 14e39a10a7c6ffb4c558369e96c35f8b9e220732.cu | #include "includes.h"
__global__ void box_iou_cuda_kernel(float * box_iou, float4 * box1, float4 * box2, long M, long N, int idxJump) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t b1_idx, b2_idx, b1_row_offset, b2_row_offset;
float xmin1, xmin2, xmax1, xmax2, ymin1, ymin2, ymax1, ymax2, x_tl, y_tl, x_br, y_br, w, h, inter, area1, area2, iou;
for (long i=idx; i<M*N; i+=idxJump){
b1_idx = i/N;
b2_idx = i%N;
b1_row_offset = b1_idx;
b2_row_offset = b2_idx;
xmin1 = box1[b1_row_offset].x;
ymin1 = box1[b1_row_offset].y;
xmax1 = box1[b1_row_offset].z;
ymax1 = box1[b1_row_offset].w;
xmin2 = box2[b2_row_offset].x;
ymin2 = box2[b2_row_offset].y;
xmax2 = box2[b2_row_offset].z;
ymax2 = box2[b2_row_offset].w;
x_tl = fmaxf(xmin1,xmin2);
y_tl = fmaxf(ymin1,ymin2);
x_br = fminf(xmax1,xmax2);
y_br = fminf(ymax1,ymax2);
w = (x_br - x_tl + 1)<0 ? 0.0f:(x_br - x_tl + 1);
h = (y_br - y_tl + 1)<0 ? 0.0f:(y_br - y_tl + 1);
inter = w*h;
area1 = (xmax1-xmin1+1)*(ymax1-ymin1+1);
area2 = (xmax2-xmin2+1)*(ymax2-ymin2+1);
iou = inter/(area1+area2-inter);
box_iou[b1_idx*N+b2_idx]=iou;
}
} |
fdadd0e7d46dc3ccb29d0aff72b16a0a306f6e93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define N 10000000
__global__ void compute_histogram(unsigned char *data, unsigned int *histogram)
{
__shared__ unsigned int cache[256];
int i = blockIdx.x * blockDim.x + threadIdx.x;
cache[threadIdx.x] = 0;
__syncthreads();
while(i < N)
{
atomicAdd(&cache[data[i]], 1);
i += blockDim.x * gridDim.x;
}
__syncthreads();
atomicAdd(&histogram[threadIdx.x], cache[threadIdx.x]);
} | fdadd0e7d46dc3ccb29d0aff72b16a0a306f6e93.cu | #include "includes.h"
#define N 10000000
__global__ void compute_histogram(unsigned char *data, unsigned int *histogram)
{
__shared__ unsigned int cache[256];
int i = blockIdx.x * blockDim.x + threadIdx.x;
cache[threadIdx.x] = 0;
__syncthreads();
while(i < N)
{
atomicAdd(&cache[data[i]], 1);
i += blockDim.x * gridDim.x;
}
__syncthreads();
atomicAdd(&histogram[threadIdx.x], cache[threadIdx.x]);
} |
7f606f70a56784578a86a8569aeb2bfd8942607e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <stdint.h>
#include <hip/hip_runtime.h> // cuda/6.5
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
/** Hardcoded Variables **/
#define WMAX 10.0
#define RADIUS 6.0
#define WINDOW_DISTANCE 10.0
/** Used for vector indexing **/
#define X 0
#define Y 1
#define Z 2
#define THREADSPERBLOCK 128
__global__ void ray_thread(double *G, int *n, double *wmax, double *r, double *L, double *c);
__device__ double dot3(double * vec1, double * vec2);
__device__ double mag3(double * vec);
__device__ void scale3(double scalar, double * in_vec, double * out);
__device__ void subvec3(double * vec1, double * vec2, double * diff);
__device__ int gridindex(double * vec, int grid_dim, double window_dim);
__device__ void sample_vec(double * vec, uint64_t * seed);
__device__ double LCG_random_double(uint64_t * seed);
__device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n);
static __inline__ __device__ double atomicAdd(double *address, double val);
int main(int argc, char ** args){
// arg check
if (argc != 3){
printf("---Incorrect Arguments---\nProgram should be run with the following:\n EXECUTABLE GRIDDIMENSION NUMBER-OF-RAYS\n");
return EXIT_SUCCESS;
}
// Initialize timer
clock_t stopwatch;
// Start time
stopwatch = clock();
// user-defined number of rays
int num_rays = atoi(args[2]);
// Calculate number of blocks based on threads per and total rays
int blocks = (num_rays + THREADSPERBLOCK - 1) / THREADSPERBLOCK;
// Below will all be passed to kernel
// user-defined grid dimension
int n = atoi(args[1]);
// Hardcoded variables
double wmax = WMAX;
double r = RADIUS;
double L[3] = {4.0, 4.0, -1.0};
double c[3] = {0.0, 12.0, 0.0};
// Allocate space for device variables
// grid dimension
int *d_n;
hipMalloc((void **)&d_n, sizeof(int));
hipMemcpy(d_n, &n, sizeof(int), hipMemcpyHostToDevice);
// window dimension
double *d_wmax;
hipMalloc((void **)&d_wmax, sizeof(double));
hipMemcpy(d_wmax, &wmax, sizeof(double), hipMemcpyHostToDevice);
// radius
double *d_r;
hipMalloc((void **)&d_r, sizeof(double));
hipMemcpy(d_r, &r, sizeof(double), hipMemcpyHostToDevice);
// light source coordinates
double *d_L;
hipMalloc((void **)&d_L, 3 * sizeof(double));
hipMemcpy(d_L, L, 3 * sizeof(double), hipMemcpyHostToDevice);
// sphere coordinates
double *d_c;
hipMalloc((void **)&d_c, 3 * sizeof(double));
hipMemcpy(d_c, c, 3 * sizeof(double), hipMemcpyHostToDevice);
// Host and device grids
double *h_G;
double *d_G;
// allocate host memory for the grid and initialize to 0
h_G = (double *) calloc(n * n, sizeof(double)); // calloc initializes allocated memory to 0
// allocate device memory
hipMalloc((void**)&d_G, n * n * sizeof(double));
// Set CUDA memory to 0
hipMemset(d_G, 0, n * n * sizeof(double));
// Initialize the kernel and run GPU code
hipLaunchKernelGGL(( ray_thread), dim3(blocks),dim3(THREADSPERBLOCK), 0, 0, d_G, d_n, d_wmax, d_r, d_L, d_c);
// Copy device grid back to host
hipMemcpy(h_G, d_G, n * n * sizeof(double), hipMemcpyDeviceToHost);
// Write data to file
FILE * f0 = fopen("data.bin", "wb");
fwrite(h_G, sizeof(double), n * n, f0);
// Close file and free memory
fclose(f0);
free(h_G);
hipFree(d_n);
hipFree(d_wmax);
hipFree(d_r);
hipFree(d_L);
hipFree(d_c);
hipFree(d_G);
// Calculate elapsed time
double elapsed = ((double) (clock() - stopwatch)) / CLOCKS_PER_SEC;
// Print total runtime
printf("Total execution time: %.2f seconds\n", elapsed);
return EXIT_SUCCESS;
}
/*** Helper Functions ***/
__global__ void ray_thread(double *G, int *n, double *wmax, double *r, double *L, double *c){
// Undefined Variable Declarations
// Declared here so each thread has own copy
double v[3];
double I[3];
double N_pt1[3];
double N[3];
double S_pt1[3];
double S[3];
double t_test = 1.0; // dummy initial value to avoid triggering conditional erroneously
double t, scalar, brightness;
// Thread ID
int tid = threadIdx.x + blockDim.x*blockIdx.x;
// Seed random number generator
uint64_t seed = 424242;
// Fast forward PRNG based on thread id
seed = fast_forward_LCG(seed, 200 * tid);
// Define window intersection outside acceptable bounds
double w[3] = {*wmax + 1, WINDOW_DISTANCE, *wmax + 1};
while ( fabs(w[X]) >= *wmax || fabs(w[Z]) >= *wmax || t_test <= 0 ){
// Randomly select a new ray and store values in v
sample_vec(v, &seed);
// Calculate scalar value
scalar = w[Y]/v[Y];
// Calculate scalar w from vector v
scale3( scalar, v, w );
// Calculate t component necessary for realness test
t_test = pow(dot3(v, c), 2) + (*r * *r) - dot3(c, c);
}
t = dot3(v, c) - sqrt(t_test);
scale3( t, v, I);
// I - c stored in N_pt1
subvec3(I, c, N_pt1);
// Divide difference by magnitude of difference; store in N
scale3( 1/mag3(N_pt1), N_pt1, N);
// L - I stored in S_pt1
subvec3(L, I, S_pt1);
// Divide difference by magnitude of difference; store in S
scale3( 1/mag3(S_pt1), S_pt1, S);
// Calculate brightness; if brightness < 0, use 0
brightness = fmax(0, dot3(S, N));
int index = gridindex(w, *n, *wmax);
atomicAdd( &G[index], brightness);
}
// Calculates 3d dot product
__device__ double dot3(double * vec1, double * vec2){
double out = 0;
for (int i = 0; i < 3; i++){
out = out + (vec1[i] * vec2[i]);
}
return out;
}
// Calculates magnitude of 3d vector
__device__ double mag3(double * vec){
double out = sqrt( (vec[0] * vec[0]) + (vec[1] * vec[1]) + (vec[2] * vec[2]) );
return out;
}
// Calculates scalar from vector and scalar value
__device__ void scale3(double scalar, double * in_vec, double * out){
for (int i = 0; i < 3; i++){
out[i] = scalar * in_vec[i];
}
}
// Perform vector subtraction
__device__ void subvec3(double * vec1, double * vec2, double * diff){
for (int i = 0; i < 3; i++){
diff[i] = vec1[i] - vec2[i];
}
}
// Convert window position to grid index
__device__ int gridindex(double * vec, int grid_dim, double window_dim){
// Correct offset to make positive
vec[X] = vec[X] + window_dim;
vec[Z] = vec[Z] + window_dim;
// Scale between grid dimension and window dimension
double ratio = grid_dim / (2 * window_dim);
// Scale window dimension to grid dimension
int row = vec[X] * ratio;
int column = vec[Z] * ratio;
// NOTE: column is now correct; row is flipped
// Row n is row zero and row zero is row n
// Below corrects
row = abs(row - grid_dim);
return (row * grid_dim) + column;
}
// Randomly sample rays
__device__ void sample_vec(double * vec, uint64_t * seed){
double phi = LCG_random_double(seed) * 2 * M_PI;
double cos_theta = -1.0 + (2.0 * LCG_random_double(seed));
double sin_theta = sqrt(1 - pow(cos_theta, 2));
vec[X] = sin_theta * cos(phi);
vec[Y] = sin_theta * sin(phi);
vec[Z] = cos_theta;
}
// PRNG
__device__ double LCG_random_double(uint64_t * seed){
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
// update seed
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
// Fast forward PRNG
__device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n){
const uint64_t m = 9223372036854775808ULL; // 2^63
uint64_t a = 2806196910506780709ULL;
uint64_t c = 1ULL;
n = n % m;
uint64_t a_new = 1;
uint64_t c_new = 0;
while (n > 0){
if ( n & 1 ){
a_new *= a;
c_new = c_new * a + c;
}
c *= (a + 1);
a *= a;
n >>= 1;
}
return (a_new * seed + c_new) % m;
}
// Atomic addition function
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
if (val==0.0)
return __longlong_as_double(old);
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
| 7f606f70a56784578a86a8569aeb2bfd8942607e.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <stdint.h>
#include <cuda.h> // cuda/6.5
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
/** Hardcoded Variables **/
#define WMAX 10.0
#define RADIUS 6.0
#define WINDOW_DISTANCE 10.0
/** Used for vector indexing **/
#define X 0
#define Y 1
#define Z 2
#define THREADSPERBLOCK 128
__global__ void ray_thread(double *G, int *n, double *wmax, double *r, double *L, double *c);
__device__ double dot3(double * vec1, double * vec2);
__device__ double mag3(double * vec);
__device__ void scale3(double scalar, double * in_vec, double * out);
__device__ void subvec3(double * vec1, double * vec2, double * diff);
__device__ int gridindex(double * vec, int grid_dim, double window_dim);
__device__ void sample_vec(double * vec, uint64_t * seed);
__device__ double LCG_random_double(uint64_t * seed);
__device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n);
static __inline__ __device__ double atomicAdd(double *address, double val);
int main(int argc, char ** args){
// arg check
if (argc != 3){
printf("---Incorrect Arguments---\nProgram should be run with the following:\n EXECUTABLE GRIDDIMENSION NUMBER-OF-RAYS\n");
return EXIT_SUCCESS;
}
// Initialize timer
clock_t stopwatch;
// Start time
stopwatch = clock();
// user-defined number of rays
int num_rays = atoi(args[2]);
// Calculate number of blocks based on threads per and total rays
int blocks = (num_rays + THREADSPERBLOCK - 1) / THREADSPERBLOCK;
// Below will all be passed to kernel
// user-defined grid dimension
int n = atoi(args[1]);
// Hardcoded variables
double wmax = WMAX;
double r = RADIUS;
double L[3] = {4.0, 4.0, -1.0};
double c[3] = {0.0, 12.0, 0.0};
// Allocate space for device variables
// grid dimension
int *d_n;
cudaMalloc((void **)&d_n, sizeof(int));
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
// window dimension
double *d_wmax;
cudaMalloc((void **)&d_wmax, sizeof(double));
cudaMemcpy(d_wmax, &wmax, sizeof(double), cudaMemcpyHostToDevice);
// radius
double *d_r;
cudaMalloc((void **)&d_r, sizeof(double));
cudaMemcpy(d_r, &r, sizeof(double), cudaMemcpyHostToDevice);
// light source coordinates
double *d_L;
cudaMalloc((void **)&d_L, 3 * sizeof(double));
cudaMemcpy(d_L, L, 3 * sizeof(double), cudaMemcpyHostToDevice);
// sphere coordinates
double *d_c;
cudaMalloc((void **)&d_c, 3 * sizeof(double));
cudaMemcpy(d_c, c, 3 * sizeof(double), cudaMemcpyHostToDevice);
// Host and device grids
double *h_G;
double *d_G;
// allocate host memory for the grid and initialize to 0
h_G = (double *) calloc(n * n, sizeof(double)); // calloc initializes allocated memory to 0
// allocate device memory
cudaMalloc((void**)&d_G, n * n * sizeof(double));
// Set CUDA memory to 0
cudaMemset(d_G, 0, n * n * sizeof(double));
// Initialize the kernel and run GPU code
ray_thread<<<blocks,THREADSPERBLOCK>>>(d_G, d_n, d_wmax, d_r, d_L, d_c);
// Copy device grid back to host
cudaMemcpy(h_G, d_G, n * n * sizeof(double), cudaMemcpyDeviceToHost);
// Write data to file
FILE * f0 = fopen("data.bin", "wb");
fwrite(h_G, sizeof(double), n * n, f0);
// Close file and free memory
fclose(f0);
free(h_G);
cudaFree(d_n);
cudaFree(d_wmax);
cudaFree(d_r);
cudaFree(d_L);
cudaFree(d_c);
cudaFree(d_G);
// Calculate elapsed time
double elapsed = ((double) (clock() - stopwatch)) / CLOCKS_PER_SEC;
// Print total runtime
printf("Total execution time: %.2f seconds\n", elapsed);
return EXIT_SUCCESS;
}
/*** Helper Functions ***/
__global__ void ray_thread(double *G, int *n, double *wmax, double *r, double *L, double *c){
// Undefined Variable Declarations
// Declared here so each thread has own copy
double v[3];
double I[3];
double N_pt1[3];
double N[3];
double S_pt1[3];
double S[3];
double t_test = 1.0; // dummy initial value to avoid triggering conditional erroneously
double t, scalar, brightness;
// Thread ID
int tid = threadIdx.x + blockDim.x*blockIdx.x;
// Seed random number generator
uint64_t seed = 424242;
// Fast forward PRNG based on thread id
seed = fast_forward_LCG(seed, 200 * tid);
// Define window intersection outside acceptable bounds
double w[3] = {*wmax + 1, WINDOW_DISTANCE, *wmax + 1};
while ( fabs(w[X]) >= *wmax || fabs(w[Z]) >= *wmax || t_test <= 0 ){
// Randomly select a new ray and store values in v
sample_vec(v, &seed);
// Calculate scalar value
scalar = w[Y]/v[Y];
// Calculate scalar w from vector v
scale3( scalar, v, w );
// Calculate t component necessary for realness test
t_test = pow(dot3(v, c), 2) + (*r * *r) - dot3(c, c);
}
t = dot3(v, c) - sqrt(t_test);
scale3( t, v, I);
// I - c stored in N_pt1
subvec3(I, c, N_pt1);
// Divide difference by magnitude of difference; store in N
scale3( 1/mag3(N_pt1), N_pt1, N);
// L - I stored in S_pt1
subvec3(L, I, S_pt1);
// Divide difference by magnitude of difference; store in S
scale3( 1/mag3(S_pt1), S_pt1, S);
// Calculate brightness; if brightness < 0, use 0
brightness = fmax(0, dot3(S, N));
int index = gridindex(w, *n, *wmax);
atomicAdd( &G[index], brightness);
}
// Calculates 3d dot product
__device__ double dot3(double * vec1, double * vec2){
double out = 0;
for (int i = 0; i < 3; i++){
out = out + (vec1[i] * vec2[i]);
}
return out;
}
// Calculates magnitude of 3d vector
__device__ double mag3(double * vec){
double out = sqrt( (vec[0] * vec[0]) + (vec[1] * vec[1]) + (vec[2] * vec[2]) );
return out;
}
// Calculates scalar from vector and scalar value
__device__ void scale3(double scalar, double * in_vec, double * out){
for (int i = 0; i < 3; i++){
out[i] = scalar * in_vec[i];
}
}
// Perform vector subtraction
__device__ void subvec3(double * vec1, double * vec2, double * diff){
for (int i = 0; i < 3; i++){
diff[i] = vec1[i] - vec2[i];
}
}
// Convert window position to grid index
__device__ int gridindex(double * vec, int grid_dim, double window_dim){
// Correct offset to make positive
vec[X] = vec[X] + window_dim;
vec[Z] = vec[Z] + window_dim;
// Scale between grid dimension and window dimension
double ratio = grid_dim / (2 * window_dim);
// Scale window dimension to grid dimension
int row = vec[X] * ratio;
int column = vec[Z] * ratio;
// NOTE: column is now correct; row is flipped
// Row n is row zero and row zero is row n
// Below corrects
row = abs(row - grid_dim);
return (row * grid_dim) + column;
}
// Randomly sample rays
__device__ void sample_vec(double * vec, uint64_t * seed){
double phi = LCG_random_double(seed) * 2 * M_PI;
double cos_theta = -1.0 + (2.0 * LCG_random_double(seed));
double sin_theta = sqrt(1 - pow(cos_theta, 2));
vec[X] = sin_theta * cos(phi);
vec[Y] = sin_theta * sin(phi);
vec[Z] = cos_theta;
}
// PRNG
__device__ double LCG_random_double(uint64_t * seed){
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
// update seed
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
// Fast forward PRNG
__device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n){
const uint64_t m = 9223372036854775808ULL; // 2^63
uint64_t a = 2806196910506780709ULL;
uint64_t c = 1ULL;
n = n % m;
uint64_t a_new = 1;
uint64_t c_new = 0;
while (n > 0){
if ( n & 1 ){
a_new *= a;
c_new = c_new * a + c;
}
c *= (a + 1);
a *= a;
n >>= 1;
}
return (a_new * seed + c_new) % m;
}
// Atomic addition function
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
if (val==0.0)
return __longlong_as_double(old);
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
|
2ccc53e3714f36ed7cf1f259ae0560c84d190f1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__ void add(int n, float* x, float* y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void) {
int N = 1 << 20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 2ccc53e3714f36ed7cf1f259ae0560c84d190f1e.cu | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__ void add(int n, float* x, float* y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void) {
int N = 1 << 20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// Run kernel on 1M elements on the GPU
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
ce8ac6ba968ee1b12de3c89b1c23c67b697a5f3e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "fast_mean_delta_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *delta = NULL;
hipMalloc(&delta, XSIZE*YSIZE);
float *variance = NULL;
hipMalloc(&variance, XSIZE*YSIZE);
int batch = 2;
int filters = 2;
int spatial = 2;
float *mean_delta = NULL;
hipMalloc(&mean_delta, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
fast_mean_delta_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, delta,variance,batch,filters,spatial,mean_delta);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
fast_mean_delta_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, delta,variance,batch,filters,spatial,mean_delta);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
fast_mean_delta_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, delta,variance,batch,filters,spatial,mean_delta);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ce8ac6ba968ee1b12de3c89b1c23c67b697a5f3e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "fast_mean_delta_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *delta = NULL;
cudaMalloc(&delta, XSIZE*YSIZE);
float *variance = NULL;
cudaMalloc(&variance, XSIZE*YSIZE);
int batch = 2;
int filters = 2;
int spatial = 2;
float *mean_delta = NULL;
cudaMalloc(&mean_delta, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
fast_mean_delta_kernel<<<gridBlock,threadBlock>>>(delta,variance,batch,filters,spatial,mean_delta);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
fast_mean_delta_kernel<<<gridBlock,threadBlock>>>(delta,variance,batch,filters,spatial,mean_delta);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
fast_mean_delta_kernel<<<gridBlock,threadBlock>>>(delta,variance,batch,filters,spatial,mean_delta);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e6027d08be511e83e5df32be6c9307c76fbd5959.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define Block_size 32
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
const int INF = 1000000000;
void input(char *inFileName);
void output(char *outFileName);
void block_FW(int B);
int ceil(int a, int b);
__global__ void cal(int* Dist, int B, int Dist_width, int Round, int par_x, int par_y, int phase);
int n, m;
int **Dist;
int *device_Dist[2] = {NULL , NULL}; // GPU image array
int main(int argc, char* argv[]) {
input(argv[1]);
int B = Block_size;
block_FW(B);
output(argv[2]);
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
int round = ceil(n, Block_size) * Block_size; //memset change to other method
Dist = (int**)malloc(sizeof(int*) * round);
for(int i = 0;i < round;i++){
Dist[i] = (int*)malloc(sizeof(int) * round);
}
for (int i = 0; i < round; ++ i) {
for (int j = 0; j < round; ++ j) {
if (i == j) {
Dist[i][j] = 0;
} else {
Dist[i][j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++ i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]][pair[1]] = pair[2];
}
fclose(file);
}
void output(char *outFileName) {
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i][j] >= INF)
Dist[i][j] = INF;
}
fwrite(Dist[i], sizeof(int), n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) {
return (a + b - 1) / b;
}
void block_FW(int B) {
int round = ceil(n, B);
int split = (int)(round / 2) + 1;
hipSetDevice(0);
hipMalloc((void **)&device_Dist[0], (size_t)(round * B * round * B * sizeof(int)));
for(int i = 0;i < split * B;i++){
hipMemcpy(device_Dist[0] + (i * round * B), Dist[i], (size_t)(round * B * sizeof(int)), hipMemcpyHostToDevice);
}
hipSetDevice(1);
hipMalloc((void **)&device_Dist[1], (size_t)(round * B * round * B * sizeof(int)));
for(int i = split * B;i < round * B;i++){
hipMemcpy(device_Dist[1] + (i * round * B), Dist[i], (size_t)(round * B * sizeof(int)), hipMemcpyHostToDevice);
}
dim3 grid1(1, 1); dim3 grid20(1, round);
dim3 grid21_0(split, 1); dim3 grid3_0(split, round);
dim3 grid21_1(round - split, 1); dim3 grid3_1(round - split, round);
dim3 block(Block_size , Block_size);
for (int r = 0; r < round; ++r) {
int x = (r < split)? 0 : 1;
hipSetDevice(x);
/* Phase 1*/
hipLaunchKernelGGL(( cal), dim3(grid1) , dim3(block), 0, 0, device_Dist[x], B, round * B, r, r, r, 1);
/* Phase 2 row*/
hipLaunchKernelGGL(( cal), dim3(grid20) , dim3(block), 0, 0, device_Dist[x], B, round * B, r, r, 0, 20);
hipMemcpyPeer((void*)(device_Dist[1 - x] + (r * B * round * B)), 1 - x, (void*)(device_Dist[x] + (r * B * round * B)), x, (size_t)(B * round * B * sizeof(int)));
hipSetDevice(0);
/* Phase 2 col*/
hipLaunchKernelGGL(( cal), dim3(grid21_0) , dim3(block), 0, 0, device_Dist[0], B, round * B, r, 0, r, 21);
/* Phase 3*/
hipLaunchKernelGGL(( cal), dim3(grid3_0) , dim3(block), 0, 0, device_Dist[0], B, round * B, r, 0, 0, 3);
hipSetDevice(1);
/* Phase 2 col*/
hipLaunchKernelGGL(( cal), dim3(grid21_1) , dim3(block), 0, 0, device_Dist[1], B, round * B, r, split, r, 21);
/* Phase 3*/
hipLaunchKernelGGL(( cal), dim3(grid3_1) , dim3(block), 0, 0, device_Dist[1], B, round * B, r, split, 0, 3);
}
hipSetDevice(0);
for(int i = 0;i < split * B;i++){
hipMemcpy(Dist[i], device_Dist[0] + (i * round * B), (size_t)(n * sizeof(int)), hipMemcpyDeviceToHost);
}
hipSetDevice(1);
for(int i = split * B;i < n;i++){
hipMemcpy(Dist[i], device_Dist[1] + (i * round * B), (size_t)(n * sizeof(int)), hipMemcpyDeviceToHost);
}
}
__global__ void cal(int* Dist, int B, int Dist_width, int Round, int par_x, int par_y, int phase) {
__shared__ int i_k[Block_size][Block_size];
__shared__ int k_j[Block_size][Block_size];
__shared__ int i_j[Block_size][Block_size];
int real_i = par_x * B + blockIdx.x * B;
int real_j = par_y * B + blockIdx.y * B;
if(phase == 20 && real_j == Round * B){
return;
}else if(phase == 21 && real_i == Round * B){
return;
}else if(phase == 3 && (real_i == Round * B || real_j == Round * B)){
return;
}
real_i += threadIdx.y;
real_j += threadIdx.x;
int i = threadIdx.y , j = threadIdx.x , k;
i_k[i][j] = Dist[real_i * Dist_width + Round * B + j];
k_j[i][j] = Dist[(Round * B + i) * Dist_width + real_j];
i_j[i][j] = Dist[real_i * Dist_width + real_j];
__syncthreads();
if(phase == 1){
for(k = 0;k < B;k++){
if (i_k[i][k] + k_j[k][j] < i_j[i][j]) {
i_j[i][j] = i_k[i][k] + k_j[k][j];
i_k[i][j] = i_j[i][j];
k_j[i][j] = i_j[i][j];
}
__syncthreads();
}
}else if(phase == 20){
for(k = 0;k < B;k++){
if (i_k[i][k] + k_j[k][j] < i_j[i][j]) {
i_j[i][j] = i_k[i][k] + k_j[k][j];
k_j[i][j] = i_j[i][j];
}
__syncthreads();
}
}else if(phase == 21){
for(k = 0;k < B;k++){
if (i_k[i][k] + k_j[k][j] < i_j[i][j]) {
i_j[i][j] = i_k[i][k] + k_j[k][j];
i_k[i][j] = i_j[i][j];
}
__syncthreads();
}
}else{
#pragma unroll
for(k = 0; k < Block_size; k++)
i_j[i][j] = min(i_j[i][j], i_k[i][k] + k_j[k][j]);
}
Dist[real_i * Dist_width + real_j] = i_j[i][j];
}
| e6027d08be511e83e5df32be6c9307c76fbd5959.cu | #define Block_size 32
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
const int INF = 1000000000;
void input(char *inFileName);
void output(char *outFileName);
void block_FW(int B);
int ceil(int a, int b);
__global__ void cal(int* Dist, int B, int Dist_width, int Round, int par_x, int par_y, int phase);
int n, m;
int **Dist;
int *device_Dist[2] = {NULL , NULL}; // GPU image array
int main(int argc, char* argv[]) {
input(argv[1]);
int B = Block_size;
block_FW(B);
output(argv[2]);
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
int round = ceil(n, Block_size) * Block_size; //memset change to other method
Dist = (int**)malloc(sizeof(int*) * round);
for(int i = 0;i < round;i++){
Dist[i] = (int*)malloc(sizeof(int) * round);
}
for (int i = 0; i < round; ++ i) {
for (int j = 0; j < round; ++ j) {
if (i == j) {
Dist[i][j] = 0;
} else {
Dist[i][j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++ i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]][pair[1]] = pair[2];
}
fclose(file);
}
void output(char *outFileName) {
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i][j] >= INF)
Dist[i][j] = INF;
}
fwrite(Dist[i], sizeof(int), n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) {
return (a + b - 1) / b;
}
void block_FW(int B) {
int round = ceil(n, B);
int split = (int)(round / 2) + 1;
cudaSetDevice(0);
cudaMalloc((void **)&device_Dist[0], (size_t)(round * B * round * B * sizeof(int)));
for(int i = 0;i < split * B;i++){
cudaMemcpy(device_Dist[0] + (i * round * B), Dist[i], (size_t)(round * B * sizeof(int)), cudaMemcpyHostToDevice);
}
cudaSetDevice(1);
cudaMalloc((void **)&device_Dist[1], (size_t)(round * B * round * B * sizeof(int)));
for(int i = split * B;i < round * B;i++){
cudaMemcpy(device_Dist[1] + (i * round * B), Dist[i], (size_t)(round * B * sizeof(int)), cudaMemcpyHostToDevice);
}
dim3 grid1(1, 1); dim3 grid20(1, round);
dim3 grid21_0(split, 1); dim3 grid3_0(split, round);
dim3 grid21_1(round - split, 1); dim3 grid3_1(round - split, round);
dim3 block(Block_size , Block_size);
for (int r = 0; r < round; ++r) {
int x = (r < split)? 0 : 1;
cudaSetDevice(x);
/* Phase 1*/
cal<<<grid1 , block>>>(device_Dist[x], B, round * B, r, r, r, 1);
/* Phase 2 row*/
cal<<<grid20 , block>>>(device_Dist[x], B, round * B, r, r, 0, 20);
cudaMemcpyPeer((void*)(device_Dist[1 - x] + (r * B * round * B)), 1 - x, (void*)(device_Dist[x] + (r * B * round * B)), x, (size_t)(B * round * B * sizeof(int)));
cudaSetDevice(0);
/* Phase 2 col*/
cal<<<grid21_0 , block>>>(device_Dist[0], B, round * B, r, 0, r, 21);
/* Phase 3*/
cal<<<grid3_0 , block>>>(device_Dist[0], B, round * B, r, 0, 0, 3);
cudaSetDevice(1);
/* Phase 2 col*/
cal<<<grid21_1 , block>>>(device_Dist[1], B, round * B, r, split, r, 21);
/* Phase 3*/
cal<<<grid3_1 , block>>>(device_Dist[1], B, round * B, r, split, 0, 3);
}
cudaSetDevice(0);
for(int i = 0;i < split * B;i++){
cudaMemcpy(Dist[i], device_Dist[0] + (i * round * B), (size_t)(n * sizeof(int)), cudaMemcpyDeviceToHost);
}
cudaSetDevice(1);
for(int i = split * B;i < n;i++){
cudaMemcpy(Dist[i], device_Dist[1] + (i * round * B), (size_t)(n * sizeof(int)), cudaMemcpyDeviceToHost);
}
}
__global__ void cal(int* Dist, int B, int Dist_width, int Round, int par_x, int par_y, int phase) {
__shared__ int i_k[Block_size][Block_size];
__shared__ int k_j[Block_size][Block_size];
__shared__ int i_j[Block_size][Block_size];
int real_i = par_x * B + blockIdx.x * B;
int real_j = par_y * B + blockIdx.y * B;
if(phase == 20 && real_j == Round * B){
return;
}else if(phase == 21 && real_i == Round * B){
return;
}else if(phase == 3 && (real_i == Round * B || real_j == Round * B)){
return;
}
real_i += threadIdx.y;
real_j += threadIdx.x;
int i = threadIdx.y , j = threadIdx.x , k;
i_k[i][j] = Dist[real_i * Dist_width + Round * B + j];
k_j[i][j] = Dist[(Round * B + i) * Dist_width + real_j];
i_j[i][j] = Dist[real_i * Dist_width + real_j];
__syncthreads();
if(phase == 1){
for(k = 0;k < B;k++){
if (i_k[i][k] + k_j[k][j] < i_j[i][j]) {
i_j[i][j] = i_k[i][k] + k_j[k][j];
i_k[i][j] = i_j[i][j];
k_j[i][j] = i_j[i][j];
}
__syncthreads();
}
}else if(phase == 20){
for(k = 0;k < B;k++){
if (i_k[i][k] + k_j[k][j] < i_j[i][j]) {
i_j[i][j] = i_k[i][k] + k_j[k][j];
k_j[i][j] = i_j[i][j];
}
__syncthreads();
}
}else if(phase == 21){
for(k = 0;k < B;k++){
if (i_k[i][k] + k_j[k][j] < i_j[i][j]) {
i_j[i][j] = i_k[i][k] + k_j[k][j];
i_k[i][j] = i_j[i][j];
}
__syncthreads();
}
}else{
#pragma unroll
for(k = 0; k < Block_size; k++)
i_j[i][j] = min(i_j[i][j], i_k[i][k] + k_j[k][j]);
}
Dist[real_i * Dist_width + real_j] = i_j[i][j];
}
|
1877a509b821fe015f73dedc8ffb6666816ec168.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish
**********************************************************************************/
#ifndef _KERNEL2_H_
#define _KERNEL2_H_
__global__ void
Kernel2( bool* g_graph_mask, bool *g_updating_graph_mask, bool* g_graph_visited, bool *g_over, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// printf("Kernel#2 - Thread Id = %d - Before Check\n", tid);
// printf("Kernel#2 - Thread Id = %d - Number of Nodes (no_of_nodes) = %d\n", tid, no_of_nodes);
// printf("Kernel#2 - Thread Id = %d - Update Graph Mask (g_updating_graph_mask[tid]) = %d\n", tid, g_updating_graph_mask[tid]);
if( tid<no_of_nodes && g_updating_graph_mask[tid])
{
g_graph_mask[tid]=true;
g_graph_visited[tid]=true;
*g_over=true;
g_updating_graph_mask[tid]=false;
// printf("Kernel#2 - Thread Id = %d - After Check\n", tid);
// printf("Kernel#2 - Thread Id = %d - Update Graph Mask (g_updating_graph_mask[tid]) = %d\n", tid, g_updating_graph_mask[tid]);
// printf("Kernel#2 - Thread Id = %d - Visited Flag (g_graph_visited[tid]) = %d\n", tid, g_graph_visited[tid]);
// printf("Kernel#2 - Thread Id = %d - Over Flag (g_over) = %d\n", tid, g_over);
// printf("Kernel#2 - Thread Id = %d - Graph Mask (g_graph_mask[tid]) = %d\n", tid, g_graph_mask[tid]);
}
}
#endif
| 1877a509b821fe015f73dedc8ffb6666816ec168.cu | /*********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish
**********************************************************************************/
#ifndef _KERNEL2_H_
#define _KERNEL2_H_
__global__ void
Kernel2( bool* g_graph_mask, bool *g_updating_graph_mask, bool* g_graph_visited, bool *g_over, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// printf("Kernel#2 - Thread Id = %d - Before Check\n", tid);
// printf("Kernel#2 - Thread Id = %d - Number of Nodes (no_of_nodes) = %d\n", tid, no_of_nodes);
// printf("Kernel#2 - Thread Id = %d - Update Graph Mask (g_updating_graph_mask[tid]) = %d\n", tid, g_updating_graph_mask[tid]);
if( tid<no_of_nodes && g_updating_graph_mask[tid])
{
g_graph_mask[tid]=true;
g_graph_visited[tid]=true;
*g_over=true;
g_updating_graph_mask[tid]=false;
// printf("Kernel#2 - Thread Id = %d - After Check\n", tid);
// printf("Kernel#2 - Thread Id = %d - Update Graph Mask (g_updating_graph_mask[tid]) = %d\n", tid, g_updating_graph_mask[tid]);
// printf("Kernel#2 - Thread Id = %d - Visited Flag (g_graph_visited[tid]) = %d\n", tid, g_graph_visited[tid]);
// printf("Kernel#2 - Thread Id = %d - Over Flag (g_over) = %d\n", tid, g_over);
// printf("Kernel#2 - Thread Id = %d - Graph Mask (g_graph_mask[tid]) = %d\n", tid, g_graph_mask[tid]);
}
}
#endif
|
3a9f88e2ddef0a9e57c064dfd2a5f76d7541a1d2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/transform.h>
#include <typedef.hpp>
#if defined(WITH_GPU)
typedef unsigned char uint8_t;
typedef signed char int8_t;
typedef unsigned short uint16_t;
typedef signed short int16_t;
typedef unsigned int uint32_t;
//typedef signed long int32_t;
//typedef unsigned long int uint64_t;
//typedef signed long long int64_t;
#include <Core/UVec.hpp>
#include <Core/CudaStream.hpp>
#include "UVec_impl_cuda.hpp"
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
namespace beacls
{
void* allocateCudaMem(const size_t s)
{
void* ptr = NULL;
hipMalloc((void**)&ptr,s);
return ptr;
}
void freeCudaMem(void* ptr)
{
if(ptr) hipFree(ptr);
}
void copyCudaDeviceToHost(void* dst, const void* src, size_t s)
{
hipMemcpy(dst,src,s,hipMemcpyDeviceToHost);
}
void copyCudaDeviceToDevice(void* dst, const void* src, size_t s)
{
hipMemcpy(dst,src,s,hipMemcpyDeviceToDevice);
}
void copyCudaHostToDevice(void* dst, const void* src, size_t s)
{
hipMemcpy(dst,src,s,hipMemcpyHostToDevice);
}
beacls::CudaStream_impl::CudaStream_impl() {
hipStreamCreate(&stream);
}
beacls::CudaStream_impl::~CudaStream_impl() {
if (stream) {
hipStreamDestroy(stream);
}
}
hipStream_t beacls::CudaStream_impl::get_stream() {
return stream;
}
beacls::CudaStream::CudaStream() {
pimpl = new CudaStream_impl();
}
beacls::CudaStream::~CudaStream() {
delete pimpl;
}
hipStream_t beacls::CudaStream::get_stream() {
if (pimpl) return pimpl->get_stream();
else return NULL;
}
hipStream_t get_stream(const beacls::UVec& src) {
if (beacls::is_cuda(src)) {
beacls::CudaStream* cudaStream = src.get_cudaStream();
if (cudaStream) return cudaStream->get_stream();
else return NULL;
}
else return NULL;
}
void copyCudaDeviceToHostAsync(void* dst, const void* src, const size_t s, beacls::CudaStream* cudaStream) {
if (cudaStream) {
hipStream_t stream = cudaStream->get_stream();
hipMemcpyAsync(dst, src, s, hipMemcpyDeviceToHost, stream);
}
}
void copyCudaDeviceToDeviceAsync(void* dst, const void* src, const size_t s, beacls::CudaStream* cudaStream) {
if (cudaStream) {
hipStream_t stream = cudaStream->get_stream();
hipMemcpyAsync(dst, src, s, hipMemcpyDeviceToDevice, stream);
}
}
void copyCudaHostToDeviceAsync(void* dst, const void* src, const size_t s, beacls::CudaStream* cudaStream) {
if (cudaStream) {
hipStream_t stream = cudaStream->get_stream();
hipMemcpyAsync(dst, src, s, hipMemcpyHostToDevice, stream);
}
}
void synchronizeCuda(beacls::CudaStream* cudaStream) {
if (cudaStream) {
hipStream_t stream = cudaStream->get_stream();
hipStreamSynchronize(stream);
}
}
template <typename T>
void fillCudaMemory_template(T* dst_raw_ptr, const T val, size_t length) {
thrust::device_ptr<T> dst_dev_ptr = thrust::device_pointer_cast((T*)dst_raw_ptr);
thrust::fill(dst_dev_ptr, dst_dev_ptr + length, val);
}
void fillCudaMemory(uint8_t* dst, const uint8_t val, size_t s)
{
fillCudaMemory_template<uint8_t>(dst,val,s);
}
void fillCudaMemory(int8_t* dst, const int8_t val, size_t s)
{
fillCudaMemory_template<int8_t>(dst,val,s);
}
void fillCudaMemory(uint16_t* dst, const uint16_t val, size_t s)
{
fillCudaMemory_template<uint16_t>(dst,val,s);
}
void fillCudaMemory(int16_t* dst, const int16_t val, size_t s)
{
fillCudaMemory_template<int16_t>(dst,val,s);
}
void fillCudaMemory(uint32_t* dst, const uint32_t val, size_t s)
{
fillCudaMemory_template<uint32_t>(dst,val,s);
}
void fillCudaMemory(int32_t* dst, const int32_t val, size_t s)
{
fillCudaMemory_template<int32_t>(dst,val,s);
}
void fillCudaMemory(uint64_t* dst, const uint64_t val, size_t s)
{
fillCudaMemory_template<uint64_t>(dst,val,s);
}
void fillCudaMemory(int64_t* dst, const int64_t val, size_t s)
{
fillCudaMemory_template<int64_t>(dst,val,s);
}
void fillCudaMemory(double* dst, const double val, size_t s)
{
fillCudaMemory_template<double>(dst,val,s);
}
void fillCudaMemory(float* dst, const float val, size_t s)
{
fillCudaMemory_template<float>(dst,val,s);
}
template<typename T>
struct AverageFunctor : public thrust::binary_function<const T, const T, T> {
__host__ __device__
T operator()(const T& rhs, const T& lhs) const
{
return (rhs + lhs) / 2;
}
};
template <typename T>
void average_template(void* dst_raw_ptr, const void* src1_raw_ptr, const void* src2_raw_ptr, const size_t length, hipStream_t stream) {
thrust::device_ptr<T> dst_dev_ptr = thrust::device_pointer_cast((T*)dst_raw_ptr);
thrust::device_ptr<const T> src1_dev_ptr = thrust::device_pointer_cast((const T*)src1_raw_ptr);
thrust::device_ptr<const T> src2_dev_ptr = thrust::device_pointer_cast((const T*)src2_raw_ptr);
thrust::transform(thrust::hip::par.on(stream),
src1_dev_ptr, src1_dev_ptr + length, src2_dev_ptr, dst_dev_ptr, AverageFunctor<T>());
}
void cudaAverage(beacls::UVec& dst_uvec, const beacls::UVec& src1, const beacls::UVec& src2) {
const size_t length = src1.size();
const UVecDepth d = src1.depth();
beacls::reallocateAsSrc(dst_uvec, src1);
dst_uvec.set_cudaStream(src1.get_cudaStream());
hipStream_t stream = beacls::get_stream(dst_uvec);
FLOAT_TYPE* dst_ptr = beacls::UVec_<FLOAT_TYPE>(dst_uvec).ptr();
const FLOAT_TYPE* src1_ptr = beacls::UVec_<FLOAT_TYPE>(src1).ptr();
const FLOAT_TYPE* src2_ptr = beacls::UVec_<FLOAT_TYPE>(src2).ptr();
switch (d) {
case UVecDepth_Invalid:
case UVecDepth_User:
default:
break;
case UVecDepth_8U:
average_template<uint8_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_8S:
average_template<int8_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_16U:
average_template<uint16_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_16S:
average_template<int16_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_32S:
average_template<int32_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_32F:
average_template<float>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_64F:
average_template<double>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_32U:
average_template<uint32_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_64U:
average_template<uint64_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_64S:
average_template<int64_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
}
}
int get_num_of_gpus_impl() {
int device_count=0;
hipError_t err;
err = hipGetDeviceCount(&device_count);
if(err) {
return 0;
}
return device_count;
}
void set_gpu_id_impl(const int id) {
hipSetDevice(id);
}
} // beacls
#endif /* defined(WITH_GPU) */
| 3a9f88e2ddef0a9e57c064dfd2a5f76d7541a1d2.cu | #include <cuda_runtime.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/transform.h>
#include <typedef.hpp>
#if defined(WITH_GPU)
typedef unsigned char uint8_t;
typedef signed char int8_t;
typedef unsigned short uint16_t;
typedef signed short int16_t;
typedef unsigned int uint32_t;
//typedef signed long int32_t;
//typedef unsigned long int uint64_t;
//typedef signed long long int64_t;
#include <Core/UVec.hpp>
#include <Core/CudaStream.hpp>
#include "UVec_impl_cuda.hpp"
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
namespace beacls
{
void* allocateCudaMem(const size_t s)
{
void* ptr = NULL;
cudaMalloc((void**)&ptr,s);
return ptr;
}
void freeCudaMem(void* ptr)
{
if(ptr) cudaFree(ptr);
}
void copyCudaDeviceToHost(void* dst, const void* src, size_t s)
{
cudaMemcpy(dst,src,s,cudaMemcpyDeviceToHost);
}
void copyCudaDeviceToDevice(void* dst, const void* src, size_t s)
{
cudaMemcpy(dst,src,s,cudaMemcpyDeviceToDevice);
}
void copyCudaHostToDevice(void* dst, const void* src, size_t s)
{
cudaMemcpy(dst,src,s,cudaMemcpyHostToDevice);
}
beacls::CudaStream_impl::CudaStream_impl() {
cudaStreamCreate(&stream);
}
beacls::CudaStream_impl::~CudaStream_impl() {
if (stream) {
cudaStreamDestroy(stream);
}
}
cudaStream_t beacls::CudaStream_impl::get_stream() {
return stream;
}
beacls::CudaStream::CudaStream() {
pimpl = new CudaStream_impl();
}
beacls::CudaStream::~CudaStream() {
delete pimpl;
}
cudaStream_t beacls::CudaStream::get_stream() {
if (pimpl) return pimpl->get_stream();
else return NULL;
}
cudaStream_t get_stream(const beacls::UVec& src) {
if (beacls::is_cuda(src)) {
beacls::CudaStream* cudaStream = src.get_cudaStream();
if (cudaStream) return cudaStream->get_stream();
else return NULL;
}
else return NULL;
}
void copyCudaDeviceToHostAsync(void* dst, const void* src, const size_t s, beacls::CudaStream* cudaStream) {
if (cudaStream) {
cudaStream_t stream = cudaStream->get_stream();
cudaMemcpyAsync(dst, src, s, cudaMemcpyDeviceToHost, stream);
}
}
void copyCudaDeviceToDeviceAsync(void* dst, const void* src, const size_t s, beacls::CudaStream* cudaStream) {
if (cudaStream) {
cudaStream_t stream = cudaStream->get_stream();
cudaMemcpyAsync(dst, src, s, cudaMemcpyDeviceToDevice, stream);
}
}
void copyCudaHostToDeviceAsync(void* dst, const void* src, const size_t s, beacls::CudaStream* cudaStream) {
if (cudaStream) {
cudaStream_t stream = cudaStream->get_stream();
cudaMemcpyAsync(dst, src, s, cudaMemcpyHostToDevice, stream);
}
}
void synchronizeCuda(beacls::CudaStream* cudaStream) {
if (cudaStream) {
cudaStream_t stream = cudaStream->get_stream();
cudaStreamSynchronize(stream);
}
}
template <typename T>
void fillCudaMemory_template(T* dst_raw_ptr, const T val, size_t length) {
thrust::device_ptr<T> dst_dev_ptr = thrust::device_pointer_cast((T*)dst_raw_ptr);
thrust::fill(dst_dev_ptr, dst_dev_ptr + length, val);
}
void fillCudaMemory(uint8_t* dst, const uint8_t val, size_t s)
{
fillCudaMemory_template<uint8_t>(dst,val,s);
}
void fillCudaMemory(int8_t* dst, const int8_t val, size_t s)
{
fillCudaMemory_template<int8_t>(dst,val,s);
}
void fillCudaMemory(uint16_t* dst, const uint16_t val, size_t s)
{
fillCudaMemory_template<uint16_t>(dst,val,s);
}
void fillCudaMemory(int16_t* dst, const int16_t val, size_t s)
{
fillCudaMemory_template<int16_t>(dst,val,s);
}
void fillCudaMemory(uint32_t* dst, const uint32_t val, size_t s)
{
fillCudaMemory_template<uint32_t>(dst,val,s);
}
void fillCudaMemory(int32_t* dst, const int32_t val, size_t s)
{
fillCudaMemory_template<int32_t>(dst,val,s);
}
void fillCudaMemory(uint64_t* dst, const uint64_t val, size_t s)
{
fillCudaMemory_template<uint64_t>(dst,val,s);
}
void fillCudaMemory(int64_t* dst, const int64_t val, size_t s)
{
fillCudaMemory_template<int64_t>(dst,val,s);
}
void fillCudaMemory(double* dst, const double val, size_t s)
{
fillCudaMemory_template<double>(dst,val,s);
}
void fillCudaMemory(float* dst, const float val, size_t s)
{
fillCudaMemory_template<float>(dst,val,s);
}
template<typename T>
struct AverageFunctor : public thrust::binary_function<const T, const T, T> {
__host__ __device__
T operator()(const T& rhs, const T& lhs) const
{
return (rhs + lhs) / 2;
}
};
template <typename T>
void average_template(void* dst_raw_ptr, const void* src1_raw_ptr, const void* src2_raw_ptr, const size_t length, cudaStream_t stream) {
thrust::device_ptr<T> dst_dev_ptr = thrust::device_pointer_cast((T*)dst_raw_ptr);
thrust::device_ptr<const T> src1_dev_ptr = thrust::device_pointer_cast((const T*)src1_raw_ptr);
thrust::device_ptr<const T> src2_dev_ptr = thrust::device_pointer_cast((const T*)src2_raw_ptr);
thrust::transform(thrust::cuda::par.on(stream),
src1_dev_ptr, src1_dev_ptr + length, src2_dev_ptr, dst_dev_ptr, AverageFunctor<T>());
}
void cudaAverage(beacls::UVec& dst_uvec, const beacls::UVec& src1, const beacls::UVec& src2) {
const size_t length = src1.size();
const UVecDepth d = src1.depth();
beacls::reallocateAsSrc(dst_uvec, src1);
dst_uvec.set_cudaStream(src1.get_cudaStream());
cudaStream_t stream = beacls::get_stream(dst_uvec);
FLOAT_TYPE* dst_ptr = beacls::UVec_<FLOAT_TYPE>(dst_uvec).ptr();
const FLOAT_TYPE* src1_ptr = beacls::UVec_<FLOAT_TYPE>(src1).ptr();
const FLOAT_TYPE* src2_ptr = beacls::UVec_<FLOAT_TYPE>(src2).ptr();
switch (d) {
case UVecDepth_Invalid:
case UVecDepth_User:
default:
break;
case UVecDepth_8U:
average_template<uint8_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_8S:
average_template<int8_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_16U:
average_template<uint16_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_16S:
average_template<int16_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_32S:
average_template<int32_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_32F:
average_template<float>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_64F:
average_template<double>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_32U:
average_template<uint32_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_64U:
average_template<uint64_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
case UVecDepth_64S:
average_template<int64_t>(dst_ptr, src1_ptr, src2_ptr, length, stream);
break;
}
}
int get_num_of_gpus_impl() {
int device_count=0;
cudaError_t err;
err = cudaGetDeviceCount(&device_count);
if(err) {
return 0;
}
return device_count;
}
void set_gpu_id_impl(const int id) {
cudaSetDevice(id);
}
} // beacls
#endif /* defined(WITH_GPU) */
|
59c643531a8e8ff2148403d37d984a98a976b28c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _GRAVLIST_KERNEL_H_
#define _GRAVLIST_KERNEL_H_
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define FETCH(t, i) tex1Dfetch(t##Tex, i)
texture<float4, 1, hipReadModeElementType> oldPosTex;
texture<uint, 1, hipReadModeElementType> gridParticleHashTex;
texture<uint, 1, hipReadModeElementType> cellStartTex;
texture<uint, 1, hipReadModeElementType> cellEndTex;
__constant__ SIMPARAM dSimParam;
__constant__ SOFTPARAM dSoftParam;
__device__ float3 calcForce(float3 pos, float3 pos2){
float3 acc = make_float3(0.0f);
float dx = pos2.x - pos.x;
float dy = pos2.y - pos.y;
float dz = pos2.z - pos.z;
float boxsize = dSimParam.boxsize;
float boxhalf = dSimParam.boxhalf;
if(dx > boxhalf)
dx -= boxsize;
if(dx < -boxhalf)
dx += boxsize;
if(dy > boxhalf)
dy -= boxsize;
if(dy < -boxhalf)
dy += boxsize;
if(dz > boxhalf)
dz -= boxsize;
if(dz < -boxhalf)
dz += boxsize;
float r2 = dx * dx + dy * dy + dz * dz;
float rcut = dSimParam.rcut;
float rcut2 = dSimParam.rcut2;
float asmth = dSimParam.asmth;
float asmthfac = dSimParam.asmthfac;
float mass = dSimParam.mass;
if(r2 > rcut2)
return acc;
float h = dSoftParam.h;
float h_inv = dSoftParam.h_inv;
float h3_inv = dSoftParam.h3_inv;
float r = sqrtf(r2);
float fac, u;
if(r >= h){
fac = mass / (r2 * r);
}else{
u = r * h_inv;
if(u < 0.5)
fac = mass * h3_inv * (10.66667 + u * u * (32.0 * u - 38.4));
else
fac = mass * h3_inv * (21.33333 - 48.0 * u
+ 38.4 * u * u - 10.66667 * u * u * u - 0.06667 / (u * u * u));
}
int tabindex = (int) (asmthfac * r);
if(tabindex < NTAB){
fac *= dSimParam.shortrange_table[tabindex];
acc.x += dx * fac;
acc.y += dy * fac;
acc.z += dz * fac;
}
acc *= dSimParam.G;
return acc;
}
__global__
void calcHashD(uint *gridParticleHash,
uint *gridParticleIndex,
float4 *pos,
uint numParticles){
uint index = blockIdx.y * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if(index >= numParticles)
return;
float gridFac = dSimParam.to_grid_fac;
float4 p = pos[index];
int3 gridPos;
gridPos.x = gridFac * p.x;
gridPos.y = gridFac * p.y;
gridPos.z = gridFac * p.z;
uint hash;
hash = (gridPos.x * PMGRID + gridPos.y) * PMGRID + gridPos.z;
gridParticleHash[index] = hash;
gridParticleIndex[index] = index;
}
__global__
void reorderDataAndFindCellStartD(uint *cellStart,
uint *cellEnd,
float4 *sortedPos,
uint *gridParticleHash,
uint *gridParticleIndex,
float4 *oldPos,
uint numParticles){
extern __shared__ uint sharedHash[];
uint index = blockIdx.y * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if(index >= numParticles)
return;
uint hash;
hash = gridParticleHash[index];
sharedHash[threadIdx.x + 1] = hash;
if(index > 0 && threadIdx.x == 0){
sharedHash[0] = gridParticleHash[index - 1];
}
__syncthreads();
if(index == 0 || hash != sharedHash[threadIdx.x]){
cellStart[hash] = index;
if(index > 0)
cellEnd[sharedHash[threadIdx.x]] = index;
}
if(index == numParticles - 1){
cellEnd[hash] = index + 1;
}
uint sortedIndex = gridParticleIndex[index];
float4 pos = FETCH(oldPos, sortedIndex);
sortedPos[index] = pos;
}
__global__
void cudaForceEvaluateShortrangeD(float4 *gravAccel,
float4 *oldPos,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
uint numParticles){
uint index = blockIdx.y * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if(index >= numParticles)
return;
float3 pos = make_float3(FETCH(oldPos, index));
float3 acc = make_float3(0.0f);
float gridFac = dSimParam.to_grid_fac;
float rcut = dSimParam.rcut;
int xl, xr, yl, yr, zl, zr;
xl = gridFac * (pos.x - rcut) + PMGRID;
xl -= PMGRID;
xr = gridFac * (pos.x + rcut);
yl = gridFac * (pos.y - rcut) + PMGRID;
yl -= PMGRID;
yr = gridFac * (pos.y + rcut);
zl = gridFac * (pos.z - rcut) + PMGRID;
zl -= PMGRID;
zr = gridFac * (pos.z + rcut);
int ix, iy, iz, iix, iiy, iiz;
for(ix = xl; ix <= xr; ++ix){
for(iy = yl; iy <= yr; ++iy){
for(iz = zl; iz <= zr; ++iz){
iix = ix;
iiy = iy;
iiz = iz;
if(iix < 0)
iix += PMGRID;
if(iiy < 0)
iiy += PMGRID;
if(iiz < 0)
iiz += PMGRID;
if(iix >= PMGRID)
iix -= PMGRID;
if(iiy >= PMGRID)
iiy -= PMGRID;
if(iiz >= PMGRID)
iiz -= PMGRID;
uint gridHash = (iix * PMGRID + iiy) * PMGRID + iiz;
uint startIndex = FETCH(cellStart, gridHash);
if(startIndex != 0xffffffff){
uint endIndex = FETCH(cellEnd, gridHash);
for(uint j = startIndex; j < endIndex; ++j){
float3 pos2 = make_float3(FETCH(oldPos, j));
acc += calcForce(pos, pos2);
}
}
}
}
}
uint originalIndex = gridParticleIndex[index];
gravAccel[originalIndex] = make_float4(acc, 0.0f);
}
#endif
| 59c643531a8e8ff2148403d37d984a98a976b28c.cu | #ifndef _GRAVLIST_KERNEL_H_
#define _GRAVLIST_KERNEL_H_
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define FETCH(t, i) tex1Dfetch(t##Tex, i)
texture<float4, 1, cudaReadModeElementType> oldPosTex;
texture<uint, 1, cudaReadModeElementType> gridParticleHashTex;
texture<uint, 1, cudaReadModeElementType> cellStartTex;
texture<uint, 1, cudaReadModeElementType> cellEndTex;
__constant__ SIMPARAM dSimParam;
__constant__ SOFTPARAM dSoftParam;
__device__ float3 calcForce(float3 pos, float3 pos2){
float3 acc = make_float3(0.0f);
float dx = pos2.x - pos.x;
float dy = pos2.y - pos.y;
float dz = pos2.z - pos.z;
float boxsize = dSimParam.boxsize;
float boxhalf = dSimParam.boxhalf;
if(dx > boxhalf)
dx -= boxsize;
if(dx < -boxhalf)
dx += boxsize;
if(dy > boxhalf)
dy -= boxsize;
if(dy < -boxhalf)
dy += boxsize;
if(dz > boxhalf)
dz -= boxsize;
if(dz < -boxhalf)
dz += boxsize;
float r2 = dx * dx + dy * dy + dz * dz;
float rcut = dSimParam.rcut;
float rcut2 = dSimParam.rcut2;
float asmth = dSimParam.asmth;
float asmthfac = dSimParam.asmthfac;
float mass = dSimParam.mass;
if(r2 > rcut2)
return acc;
float h = dSoftParam.h;
float h_inv = dSoftParam.h_inv;
float h3_inv = dSoftParam.h3_inv;
float r = sqrtf(r2);
float fac, u;
if(r >= h){
fac = mass / (r2 * r);
}else{
u = r * h_inv;
if(u < 0.5)
fac = mass * h3_inv * (10.66667 + u * u * (32.0 * u - 38.4));
else
fac = mass * h3_inv * (21.33333 - 48.0 * u
+ 38.4 * u * u - 10.66667 * u * u * u - 0.06667 / (u * u * u));
}
int tabindex = (int) (asmthfac * r);
if(tabindex < NTAB){
fac *= dSimParam.shortrange_table[tabindex];
acc.x += dx * fac;
acc.y += dy * fac;
acc.z += dz * fac;
}
acc *= dSimParam.G;
return acc;
}
__global__
void calcHashD(uint *gridParticleHash,
uint *gridParticleIndex,
float4 *pos,
uint numParticles){
uint index = blockIdx.y * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if(index >= numParticles)
return;
float gridFac = dSimParam.to_grid_fac;
float4 p = pos[index];
int3 gridPos;
gridPos.x = gridFac * p.x;
gridPos.y = gridFac * p.y;
gridPos.z = gridFac * p.z;
uint hash;
hash = (gridPos.x * PMGRID + gridPos.y) * PMGRID + gridPos.z;
gridParticleHash[index] = hash;
gridParticleIndex[index] = index;
}
__global__
void reorderDataAndFindCellStartD(uint *cellStart,
uint *cellEnd,
float4 *sortedPos,
uint *gridParticleHash,
uint *gridParticleIndex,
float4 *oldPos,
uint numParticles){
extern __shared__ uint sharedHash[];
uint index = blockIdx.y * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if(index >= numParticles)
return;
uint hash;
hash = gridParticleHash[index];
sharedHash[threadIdx.x + 1] = hash;
if(index > 0 && threadIdx.x == 0){
sharedHash[0] = gridParticleHash[index - 1];
}
__syncthreads();
if(index == 0 || hash != sharedHash[threadIdx.x]){
cellStart[hash] = index;
if(index > 0)
cellEnd[sharedHash[threadIdx.x]] = index;
}
if(index == numParticles - 1){
cellEnd[hash] = index + 1;
}
uint sortedIndex = gridParticleIndex[index];
float4 pos = FETCH(oldPos, sortedIndex);
sortedPos[index] = pos;
}
__global__
void cudaForceEvaluateShortrangeD(float4 *gravAccel,
float4 *oldPos,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
uint numParticles){
uint index = blockIdx.y * gridDim.x * blockDim.x
+ blockIdx.x * blockDim.x + threadIdx.x;
if(index >= numParticles)
return;
float3 pos = make_float3(FETCH(oldPos, index));
float3 acc = make_float3(0.0f);
float gridFac = dSimParam.to_grid_fac;
float rcut = dSimParam.rcut;
int xl, xr, yl, yr, zl, zr;
xl = gridFac * (pos.x - rcut) + PMGRID;
xl -= PMGRID;
xr = gridFac * (pos.x + rcut);
yl = gridFac * (pos.y - rcut) + PMGRID;
yl -= PMGRID;
yr = gridFac * (pos.y + rcut);
zl = gridFac * (pos.z - rcut) + PMGRID;
zl -= PMGRID;
zr = gridFac * (pos.z + rcut);
int ix, iy, iz, iix, iiy, iiz;
for(ix = xl; ix <= xr; ++ix){
for(iy = yl; iy <= yr; ++iy){
for(iz = zl; iz <= zr; ++iz){
iix = ix;
iiy = iy;
iiz = iz;
if(iix < 0)
iix += PMGRID;
if(iiy < 0)
iiy += PMGRID;
if(iiz < 0)
iiz += PMGRID;
if(iix >= PMGRID)
iix -= PMGRID;
if(iiy >= PMGRID)
iiy -= PMGRID;
if(iiz >= PMGRID)
iiz -= PMGRID;
uint gridHash = (iix * PMGRID + iiy) * PMGRID + iiz;
uint startIndex = FETCH(cellStart, gridHash);
if(startIndex != 0xffffffff){
uint endIndex = FETCH(cellEnd, gridHash);
for(uint j = startIndex; j < endIndex; ++j){
float3 pos2 = make_float3(FETCH(oldPos, j));
acc += calcForce(pos, pos2);
}
}
}
}
}
uint originalIndex = gridParticleIndex[index];
gravAccel[originalIndex] = make_float4(acc, 0.0f);
}
#endif
|
97b2daac6287b78f89440f0d3213f81f59c9a94b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <sys/time.h>
#include <vector>
#include <limits>
#include <iostream>
#include <unistd.h>
#define THREADS 512
#ifdef __cplusplus
extern "C"
{
#endif
using namespace std;
float *cu_grid;
// initialize grid with all 0 values
float* cu_init_grid(int h, int w) {
cu_grid = (float*) malloc(sizeof(float*)*h*w);
for (int k = 0; k < h*w; k++) {
cu_grid[k] = 0;
}
return cu_grid;
}
void cu_display(float* grid, int h, int w) {
cout << "\033[2J\033[1;1H";
for (int i = 0; i < h; i++) {
printf("\n");
for (int j = 0; j < w; j++) {
if (grid[i*w+j] < 1) {
printf(" ");
} else if (grid[i*w+j] < 2) {
printf(". ");
} else if (grid[i*w+j] < 3) {
printf(".. ");
} else {
printf("... ");
}
}
}
}
// Grid's evolve function while considering previous points
__global__ void cu_next_iteration(float *grid1, float *grid2, float* row, int h, int w, int size)
{
int start = blockIdx.x*w;
if (start >= w*h) {
return;
}
// int drizzle;
if (start == 0) {
for (int i=0; i < w; i++) {
grid2[i] = row[i];
}
} else {
for (int i=start; i < start + w; i++) {
grid2[i] = 0;
}
for (int i=start; i < start + w; i++) {
if (grid1[i-w] >= size) {
// if (j-1 >= 0) {
grid2[i] += grid1[i-w]/2;
// }
if (i+1 < start + w) {
grid2[i+1] += grid1[i-w]/2;
}
// empty[j] += grid[i][j]/3;
} else {
grid2[i] += grid1[i-w];
}
}
}
}
void cuda_rainmaker(int show, int iter, int height, int width, int size_threshold, unsigned int microseconds)
{
int cu_height=height;
int cu_width=width;
int cu_size_threshold = size_threshold;
int grid_dim = cu_height*cu_width;
cu_grid = cu_init_grid(cu_height, cu_width);
float *row = (float *)malloc(sizeof(float)*cu_width);
float *device_row;
srand (time(NULL));
struct timeval time_start, time_end;
long int kernel_time=0;
// cuda
float *device_grid1, *device_grid2, *tmp;
// hipEvent_t event;
// hipEventCreate(&event);
hipMalloc((void **) &device_grid1, sizeof(float)*grid_dim);
hipMalloc((void **) &device_grid2, sizeof(float)*grid_dim);
hipMalloc((void **) &device_row, sizeof(float)*cu_width);
hipMemcpy(device_grid1, cu_grid, sizeof(float)*grid_dim, hipMemcpyHostToDevice);
// hipMemcpy(device_grid2, cu_grid, sizeof(float)*grid_dim, hipMemcpyHostToDevice);//
dim3 dimGrid(cu_height);
dim3 dimBlock(1);
for (int i = 0; i < iter; i++) {
// cu_display1(cu_grid, cu_height, cu_width);
if (show == 1) {
usleep(100000);
cu_display(cu_grid, cu_height, cu_width);
}
// printf("\n");
if (iter - i < cu_height) {
for (int j=0; j < cu_width; j++) {
row[j] = 0;
// printf("%.1f, ", row[j]);
}
} else {
for (int j=0; j < cu_width; j++) {
row[j] = rand() % 4;
// printf("%.1f, ", row[j]);
}
}
hipMemcpy(device_row, row, sizeof(float)*cu_width, hipMemcpyHostToDevice);
gettimeofday(&time_start, NULL);
hipLaunchKernelGGL(( cu_next_iteration), dim3(dimGrid), dim3(dimBlock), 0, 0, device_grid1, device_grid2, device_row, cu_height, cu_width, cu_size_threshold);
gettimeofday(&time_end, NULL);
kernel_time += (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
tmp = device_grid2;
device_grid2 = device_grid1;
device_grid1 = tmp;
// cu_split_and_merge<<<dimGrid, dimBlock>>>(device_grid1, device_grid2, cu_height, cu_width, cu_size_threshold);
// hipMemcpy(cu_grid, device_grid2, sizeof(float)*grid_dim, hipMemcpyDeviceToHost);
// hipMemcpy(device_grid1, cu_grid, sizeof(float)*grid_dim, hipMemcpyHostToDevice);
// cu_split_and_merge<<<dimGrid, dimBlock>>>(device_grid1, device_grid2, cu_height, cu_width, cu_size_threshold);
// hipMemcpy(device_grid2, cu_grid, sizeof(float)*grid_dim, hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipMemcpy(cu_grid, device_grid1, sizeof(float)*grid_dim, hipMemcpyDeviceToHost);
}
// hipDeviceSynchronize();
// hipEventSynchronize(event);
if (show == 1)
cout << "\033[2J\033[1;1H";
printf("kernel time: %ld microseconds\n", kernel_time);
// free back to heap
hipFree(device_grid1);
hipFree(device_grid2);
free(cu_grid);
}
#ifdef __cplusplus
}
#endif
| 97b2daac6287b78f89440f0d3213f81f59c9a94b.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <sys/time.h>
#include <vector>
#include <limits>
#include <iostream>
#include <unistd.h>
#define THREADS 512
#ifdef __cplusplus
extern "C"
{
#endif
using namespace std;
float *cu_grid;
// initialize grid with all 0 values
float* cu_init_grid(int h, int w) {
cu_grid = (float*) malloc(sizeof(float*)*h*w);
for (int k = 0; k < h*w; k++) {
cu_grid[k] = 0;
}
return cu_grid;
}
void cu_display(float* grid, int h, int w) {
cout << "\033[2J\033[1;1H";
for (int i = 0; i < h; i++) {
printf("\n");
for (int j = 0; j < w; j++) {
if (grid[i*w+j] < 1) {
printf(" ");
} else if (grid[i*w+j] < 2) {
printf(". ");
} else if (grid[i*w+j] < 3) {
printf(".. ");
} else {
printf("... ");
}
}
}
}
// Grid's evolve function while considering previous points
__global__ void cu_next_iteration(float *grid1, float *grid2, float* row, int h, int w, int size)
{
int start = blockIdx.x*w;
if (start >= w*h) {
return;
}
// int drizzle;
if (start == 0) {
for (int i=0; i < w; i++) {
grid2[i] = row[i];
}
} else {
for (int i=start; i < start + w; i++) {
grid2[i] = 0;
}
for (int i=start; i < start + w; i++) {
if (grid1[i-w] >= size) {
// if (j-1 >= 0) {
grid2[i] += grid1[i-w]/2;
// }
if (i+1 < start + w) {
grid2[i+1] += grid1[i-w]/2;
}
// empty[j] += grid[i][j]/3;
} else {
grid2[i] += grid1[i-w];
}
}
}
}
void cuda_rainmaker(int show, int iter, int height, int width, int size_threshold, unsigned int microseconds)
{
int cu_height=height;
int cu_width=width;
int cu_size_threshold = size_threshold;
int grid_dim = cu_height*cu_width;
cu_grid = cu_init_grid(cu_height, cu_width);
float *row = (float *)malloc(sizeof(float)*cu_width);
float *device_row;
srand (time(NULL));
struct timeval time_start, time_end;
long int kernel_time=0;
// cuda
float *device_grid1, *device_grid2, *tmp;
// cudaEvent_t event;
// cudaEventCreate(&event);
cudaMalloc((void **) &device_grid1, sizeof(float)*grid_dim);
cudaMalloc((void **) &device_grid2, sizeof(float)*grid_dim);
cudaMalloc((void **) &device_row, sizeof(float)*cu_width);
cudaMemcpy(device_grid1, cu_grid, sizeof(float)*grid_dim, cudaMemcpyHostToDevice);
// cudaMemcpy(device_grid2, cu_grid, sizeof(float)*grid_dim, cudaMemcpyHostToDevice);//
dim3 dimGrid(cu_height);
dim3 dimBlock(1);
for (int i = 0; i < iter; i++) {
// cu_display1(cu_grid, cu_height, cu_width);
if (show == 1) {
usleep(100000);
cu_display(cu_grid, cu_height, cu_width);
}
// printf("\n");
if (iter - i < cu_height) {
for (int j=0; j < cu_width; j++) {
row[j] = 0;
// printf("%.1f, ", row[j]);
}
} else {
for (int j=0; j < cu_width; j++) {
row[j] = rand() % 4;
// printf("%.1f, ", row[j]);
}
}
cudaMemcpy(device_row, row, sizeof(float)*cu_width, cudaMemcpyHostToDevice);
gettimeofday(&time_start, NULL);
cu_next_iteration<<<dimGrid, dimBlock>>>(device_grid1, device_grid2, device_row, cu_height, cu_width, cu_size_threshold);
gettimeofday(&time_end, NULL);
kernel_time += (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
tmp = device_grid2;
device_grid2 = device_grid1;
device_grid1 = tmp;
// cu_split_and_merge<<<dimGrid, dimBlock>>>(device_grid1, device_grid2, cu_height, cu_width, cu_size_threshold);
// cudaMemcpy(cu_grid, device_grid2, sizeof(float)*grid_dim, cudaMemcpyDeviceToHost);
// cudaMemcpy(device_grid1, cu_grid, sizeof(float)*grid_dim, cudaMemcpyHostToDevice);
// cu_split_and_merge<<<dimGrid, dimBlock>>>(device_grid1, device_grid2, cu_height, cu_width, cu_size_threshold);
// cudaMemcpy(device_grid2, cu_grid, sizeof(float)*grid_dim, cudaMemcpyHostToDevice);
cudaThreadSynchronize();
cudaMemcpy(cu_grid, device_grid1, sizeof(float)*grid_dim, cudaMemcpyDeviceToHost);
}
// cudaThreadSynchronize();
// cudaEventSynchronize(event);
if (show == 1)
cout << "\033[2J\033[1;1H";
printf("kernel time: %ld microseconds\n", kernel_time);
// free back to heap
cudaFree(device_grid1);
cudaFree(device_grid2);
free(cu_grid);
}
#ifdef __cplusplus
}
#endif
|
6f09ecdb4be23750a0e4eb1e8e2743752d330173.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#ifdef __cplusplus
extern "C" {
#endif
#include <math.h>
#include <stdio.h>
#include <float.h>
#include "nms_kernel.h"
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left + 1, 0.f), height = fmaxf(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
fminf(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
fminf(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(int boxes_num, float * boxes_dev,
unsigned long long * mask_dev, float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
}
#ifdef __cplusplus
}
#endif
| 6f09ecdb4be23750a0e4eb1e8e2743752d330173.cu | // ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#ifdef __cplusplus
extern "C" {
#endif
#include <math.h>
#include <stdio.h>
#include <float.h>
#include "nms_kernel.h"
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left + 1, 0.f), height = fmaxf(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
fminf(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
fminf(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(int boxes_num, float * boxes_dev,
unsigned long long * mask_dev, float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
}
#ifdef __cplusplus
}
#endif
|
1abc6d516469f0e7f7b798b983a4b0d38e27e85d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduce_hip.cuh"
#include "fill.cuh"
#include "kernel_helpers_hip.cuh"
#include <contrib/libs/cub/hipcub/hipcub.hpp>
#include <catboost/libs/cuda_wrappers/arch.cuh>
#include <contrib/libs/cub/cub/device/device_segmented_reduce.cuh>
namespace NKernel {
//current cub segmented reduce sucks on small segments problems
//LINE_SIZE should be leq 32
//TODO(noxoomo): special version for by-thread reduction in case of 1-4 elements per segment
//TODO(noxoomo): Fallback to block-reduce if one of segments is too big (e.g. loopSize > 256)
template <typename T, int BLOCK_SIZE, int LINE_SIZE>
__launch_bounds__(BLOCK_SIZE, 2048 / BLOCK_SIZE)
__global__ void SegmentedReduceWarpPartPerSegmentImpl(const T* src,
const int* segmentStarts,
const int* segmentEnds,
ui32 segmentsCount,
T* reducedSegments,
int blockCount
) {
__shared__ T localBufferStorage[BLOCK_SIZE];
const int tid = threadIdx.x;
int blockId = blockIdx.x;
while (blockId < blockCount) {
__syncthreads();
localBufferStorage[tid] = 0;
const int mask = LINE_SIZE - 1;
const int segmentsPerBlock = BLOCK_SIZE / LINE_SIZE;
const int warpId = tid / LINE_SIZE;
const int segmentId = blockId * segmentsPerBlock + warpId;
T* localBuffer = &localBufferStorage[warpId * LINE_SIZE];
int segmentStart = segmentId < segmentsCount ? segmentStarts[segmentId] : 0;
int segmentEnd = segmentId < segmentsCount ? segmentEnds[segmentId] : 0;
int segmentSize = segmentEnd - segmentStart;
src += segmentStart;
const int localId = tid & mask;
const auto loopSize = LINE_SIZE * CeilDivide(segmentSize, LINE_SIZE);
{
float tmp = 0;
for (int i = localId; i < loopSize; i += LINE_SIZE) {
tmp += i < segmentSize ? StreamLoad(src + i) : 0;
}
localBuffer[localId] = tmp;
}
const T warpResult = WarpReduce(localId, localBuffer, LINE_SIZE);
__syncthreads();
if (localId == 0) {
localBufferStorage[warpId] = warpResult;
}
__syncthreads();
if (tid < segmentsPerBlock && (blockId * segmentsPerBlock + tid < segmentsCount)) {
reducedSegments[blockId * segmentsPerBlock + tid] = localBufferStorage[tid];
}
blockId += gridDim.x;
}
}
template <typename T, int BLOCK_SIZE>
__global__ void SegmentedReduceBlockPerSegmentImpl(const T* src,
const int* segmentStarts,
const int* segmentEnds,
ui32 segmentsCount,
T* reducedSegments,
int numBlocks
) {
__shared__ T localBuffer[BLOCK_SIZE];
int blockId = blockIdx.x;
while (blockId < numBlocks) {
__syncthreads();
const int tid = threadIdx.x;
localBuffer[tid] = 0;
const int segmentId = blockId;
int segmentStart = segmentStarts[segmentId];
int segmentEnd = segmentEnds[segmentId];
int segmentSize = segmentEnd - segmentStart;
src += segmentStart;
const auto loopSize = BLOCK_SIZE * CeilDivide(segmentSize, BLOCK_SIZE);
for (int i = tid; i < loopSize; i += BLOCK_SIZE) {
localBuffer[tid] += i < segmentSize ? StreamLoad(src + i) : 0;
}
__syncthreads();
T result = FastInBlockReduce(tid, localBuffer, BLOCK_SIZE);
if (tid == 0) {
reducedSegments[blockId] = result;
}
blockId += gridDim.x;
}
}
template <typename T>
hipError_t Reduce(const T* input, T* output, ui32 size,
EOperatorType type,
TCubKernelContext& context, TCudaStream stream) {
using TKernelContext = TCubKernelContext;
switch (type) {
case EOperatorType::Sum: {
return hipcub::DeviceReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output, size,
hipcub::Sum(),
T(),
stream);
}
case EOperatorType::Max: {
return hipcub::DeviceReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output, size,
hipcub::Max(),
T(),
stream);
}
case EOperatorType::Min: {
return hipcub::DeviceReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output, size,
hipcub::Min(),
T(),
stream);
}
default: {
return hipErrorNotYetImplemented;
}
}
}
template <typename T, typename K>
hipError_t ReduceByKey(const T* input, const K* keys, ui32 size,
T* output, K* outKeys, ui32* outputSize,
EOperatorType type,
TCubKernelContext& context,
TCudaStream stream) {
using TKernelContext = TCubKernelContext;
switch (type) {
case EOperatorType::Sum: {
return hipcub::DeviceReduce::ReduceByKey(context.TempStorage, context.TempStorageSize,
keys, outKeys,
input, output,
outputSize,
hipcub::Sum(),
size,
stream);
}
case EOperatorType::Max: {
return hipcub::DeviceReduce::ReduceByKey(context.TempStorage, context.TempStorageSize,
keys, outKeys,
input, output,
outputSize,
hipcub::Max(),
size,
stream);
}
case EOperatorType::Min: {
return hipcub::DeviceReduce::ReduceByKey(context.TempStorage, context.TempStorageSize,
keys, outKeys,
input, output,
outputSize,
hipcub::Min(),
size,
stream);
}
default: {
return hipErrorNotYetImplemented;
}
}
}
template <typename T>
hipError_t SegmentedReduce(const T* input, ui32 size, const ui32* offsets, ui32 numSegments, T* output,
EOperatorType type,
TCubKernelContext& context,
TCudaStream stream) {
using TKernelContext = TCubKernelContext;
//WTF: in cub kernel interface aren't const, but test shows, that they effectively const type
int* beginOffsets = const_cast<int*>((const int*) offsets);
int* endOffsets = const_cast<int*>((const int*) (offsets + 1));
if (size == 0) {
FillBuffer(output, (T)(0), numSegments, stream);
return hipSuccess;
}
const double meanSize = size * 1.0 / numSegments;
if (meanSize < 600) {
if (!context.Initialized) {
return hipSuccess;
}
switch (type) {
case EOperatorType::Sum: {
if (meanSize <= 2) {
const ui32 lineSize = 2;
const ui32 blockSize = 256;
const ui32 segmentsPerBlock = blockSize / lineSize;
const ui32 numBlocks = CeilDivide(numSegments, segmentsPerBlock);
SegmentedReduceWarpPartPerSegmentImpl<T, blockSize, lineSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >
(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
} else if (meanSize <= 4) {
const ui32 lineSize = 4;
const ui32 blockSize = 256;
const ui32 segmentsPerBlock = blockSize / lineSize;
const ui32 numBlocks = CeilDivide(numSegments, segmentsPerBlock);
SegmentedReduceWarpPartPerSegmentImpl<T, blockSize, lineSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >
(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
} else if (meanSize <= 8) {
const ui32 lineSize = 8;
const ui32 blockSize = 256;
const ui32 segmentsPerBlock = blockSize / lineSize;
const ui32 numBlocks = CeilDivide(numSegments, segmentsPerBlock);
SegmentedReduceWarpPartPerSegmentImpl<T, blockSize, lineSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >
(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
} else if (meanSize <= 16) {
const ui32 lineSize = 16;
const ui32 blockSize = 256;
const ui32 segmentsPerBlock = blockSize / lineSize;
const ui32 numBlocks = CeilDivide(numSegments, segmentsPerBlock);
SegmentedReduceWarpPartPerSegmentImpl<T, blockSize, lineSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >
(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
} else if (meanSize <= 256) {
const ui32 lineSize = 32;
const ui32 blockSize = 256;
const ui32 segmentsPerBlock = blockSize / lineSize;
const ui32 numBlocks = CeilDivide(numSegments, segmentsPerBlock);
SegmentedReduceWarpPartPerSegmentImpl<T, blockSize, lineSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
} else {
const ui32 blockSize = 512;
const ui32 numBlocks = numSegments;
SegmentedReduceBlockPerSegmentImpl<T, blockSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
}
return hipSuccess;
}
default: {
return hipErrorNotYetImplemented;
}
}
} else {
switch (type) {
case EOperatorType::Sum: {
return hipcub::DeviceSegmentedReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output, numSegments,
beginOffsets, endOffsets,
hipcub::Sum(),
T(),
stream);
}
case EOperatorType::Max: {
return hipcub::DeviceSegmentedReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output,
numSegments,
beginOffsets, endOffsets,
hipcub::Max(),
T(),
stream);
}
case EOperatorType::Min: {
return hipcub::DeviceSegmentedReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output,
numSegments,
beginOffsets, endOffsets,
hipcub::Min(),
T(),
stream);
}
default: {
return hipErrorNotYetImplemented;
}
}
}
}
#define REDUCE(Type) \
template hipError_t Reduce<Type>(const Type* input, Type* output, ui32 size, EOperatorType type, TCubKernelContext& context, TCudaStream stream);
REDUCE(float)
REDUCE(ui32)
REDUCE(int)
template hipError_t SegmentedReduce<float>(const float* input, ui32 size, const ui32* offsets, ui32 numSegments, float* output,
EOperatorType type,
TCubKernelContext& context,
TCudaStream stream);
template hipError_t ReduceByKey<float, ui32>(const float* input, const ui32* keys, ui32 size,
float* output, ui32* outKeys, ui32* outputSize,
EOperatorType type,
TCubKernelContext& context,
TCudaStream stream);
}
| 1abc6d516469f0e7f7b798b983a4b0d38e27e85d.cu | #include "reduce.cuh"
#include "fill.cuh"
#include "kernel_helpers.cuh"
#include <contrib/libs/cub/cub/device/device_reduce.cuh>
#include <catboost/libs/cuda_wrappers/arch.cuh>
#include <contrib/libs/cub/cub/device/device_segmented_reduce.cuh>
namespace NKernel {
//current cub segmented reduce sucks on small segments problems
//LINE_SIZE should be leq 32
//TODO(noxoomo): special version for by-thread reduction in case of 1-4 elements per segment
//TODO(noxoomo): Fallback to block-reduce if one of segments is too big (e.g. loopSize > 256)
template <typename T, int BLOCK_SIZE, int LINE_SIZE>
__launch_bounds__(BLOCK_SIZE, 2048 / BLOCK_SIZE)
__global__ void SegmentedReduceWarpPartPerSegmentImpl(const T* src,
const int* segmentStarts,
const int* segmentEnds,
ui32 segmentsCount,
T* reducedSegments,
int blockCount
) {
__shared__ T localBufferStorage[BLOCK_SIZE];
const int tid = threadIdx.x;
int blockId = blockIdx.x;
while (blockId < blockCount) {
__syncthreads();
localBufferStorage[tid] = 0;
const int mask = LINE_SIZE - 1;
const int segmentsPerBlock = BLOCK_SIZE / LINE_SIZE;
const int warpId = tid / LINE_SIZE;
const int segmentId = blockId * segmentsPerBlock + warpId;
T* localBuffer = &localBufferStorage[warpId * LINE_SIZE];
int segmentStart = segmentId < segmentsCount ? segmentStarts[segmentId] : 0;
int segmentEnd = segmentId < segmentsCount ? segmentEnds[segmentId] : 0;
int segmentSize = segmentEnd - segmentStart;
src += segmentStart;
const int localId = tid & mask;
const auto loopSize = LINE_SIZE * CeilDivide(segmentSize, LINE_SIZE);
{
float tmp = 0;
for (int i = localId; i < loopSize; i += LINE_SIZE) {
tmp += i < segmentSize ? StreamLoad(src + i) : 0;
}
localBuffer[localId] = tmp;
}
const T warpResult = WarpReduce(localId, localBuffer, LINE_SIZE);
__syncthreads();
if (localId == 0) {
localBufferStorage[warpId] = warpResult;
}
__syncthreads();
if (tid < segmentsPerBlock && (blockId * segmentsPerBlock + tid < segmentsCount)) {
reducedSegments[blockId * segmentsPerBlock + tid] = localBufferStorage[tid];
}
blockId += gridDim.x;
}
}
template <typename T, int BLOCK_SIZE>
__global__ void SegmentedReduceBlockPerSegmentImpl(const T* src,
const int* segmentStarts,
const int* segmentEnds,
ui32 segmentsCount,
T* reducedSegments,
int numBlocks
) {
__shared__ T localBuffer[BLOCK_SIZE];
int blockId = blockIdx.x;
while (blockId < numBlocks) {
__syncthreads();
const int tid = threadIdx.x;
localBuffer[tid] = 0;
const int segmentId = blockId;
int segmentStart = segmentStarts[segmentId];
int segmentEnd = segmentEnds[segmentId];
int segmentSize = segmentEnd - segmentStart;
src += segmentStart;
const auto loopSize = BLOCK_SIZE * CeilDivide(segmentSize, BLOCK_SIZE);
for (int i = tid; i < loopSize; i += BLOCK_SIZE) {
localBuffer[tid] += i < segmentSize ? StreamLoad(src + i) : 0;
}
__syncthreads();
T result = FastInBlockReduce(tid, localBuffer, BLOCK_SIZE);
if (tid == 0) {
reducedSegments[blockId] = result;
}
blockId += gridDim.x;
}
}
template <typename T>
cudaError_t Reduce(const T* input, T* output, ui32 size,
EOperatorType type,
TCubKernelContext& context, TCudaStream stream) {
using TKernelContext = TCubKernelContext;
switch (type) {
case EOperatorType::Sum: {
return cub::DeviceReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output, size,
cub::Sum(),
T(),
stream);
}
case EOperatorType::Max: {
return cub::DeviceReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output, size,
cub::Max(),
T(),
stream);
}
case EOperatorType::Min: {
return cub::DeviceReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output, size,
cub::Min(),
T(),
stream);
}
default: {
return cudaErrorNotYetImplemented;
}
}
}
template <typename T, typename K>
cudaError_t ReduceByKey(const T* input, const K* keys, ui32 size,
T* output, K* outKeys, ui32* outputSize,
EOperatorType type,
TCubKernelContext& context,
TCudaStream stream) {
using TKernelContext = TCubKernelContext;
switch (type) {
case EOperatorType::Sum: {
return cub::DeviceReduce::ReduceByKey(context.TempStorage, context.TempStorageSize,
keys, outKeys,
input, output,
outputSize,
cub::Sum(),
size,
stream);
}
case EOperatorType::Max: {
return cub::DeviceReduce::ReduceByKey(context.TempStorage, context.TempStorageSize,
keys, outKeys,
input, output,
outputSize,
cub::Max(),
size,
stream);
}
case EOperatorType::Min: {
return cub::DeviceReduce::ReduceByKey(context.TempStorage, context.TempStorageSize,
keys, outKeys,
input, output,
outputSize,
cub::Min(),
size,
stream);
}
default: {
return cudaErrorNotYetImplemented;
}
}
}
template <typename T>
cudaError_t SegmentedReduce(const T* input, ui32 size, const ui32* offsets, ui32 numSegments, T* output,
EOperatorType type,
TCubKernelContext& context,
TCudaStream stream) {
using TKernelContext = TCubKernelContext;
//WTF: in cub kernel interface aren't const, but test shows, that they effectively const type
int* beginOffsets = const_cast<int*>((const int*) offsets);
int* endOffsets = const_cast<int*>((const int*) (offsets + 1));
if (size == 0) {
FillBuffer(output, (T)(0), numSegments, stream);
return cudaSuccess;
}
const double meanSize = size * 1.0 / numSegments;
if (meanSize < 600) {
if (!context.Initialized) {
return cudaSuccess;
}
switch (type) {
case EOperatorType::Sum: {
if (meanSize <= 2) {
const ui32 lineSize = 2;
const ui32 blockSize = 256;
const ui32 segmentsPerBlock = blockSize / lineSize;
const ui32 numBlocks = CeilDivide(numSegments, segmentsPerBlock);
SegmentedReduceWarpPartPerSegmentImpl<T, blockSize, lineSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >
(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
} else if (meanSize <= 4) {
const ui32 lineSize = 4;
const ui32 blockSize = 256;
const ui32 segmentsPerBlock = blockSize / lineSize;
const ui32 numBlocks = CeilDivide(numSegments, segmentsPerBlock);
SegmentedReduceWarpPartPerSegmentImpl<T, blockSize, lineSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >
(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
} else if (meanSize <= 8) {
const ui32 lineSize = 8;
const ui32 blockSize = 256;
const ui32 segmentsPerBlock = blockSize / lineSize;
const ui32 numBlocks = CeilDivide(numSegments, segmentsPerBlock);
SegmentedReduceWarpPartPerSegmentImpl<T, blockSize, lineSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >
(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
} else if (meanSize <= 16) {
const ui32 lineSize = 16;
const ui32 blockSize = 256;
const ui32 segmentsPerBlock = blockSize / lineSize;
const ui32 numBlocks = CeilDivide(numSegments, segmentsPerBlock);
SegmentedReduceWarpPartPerSegmentImpl<T, blockSize, lineSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >
(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
} else if (meanSize <= 256) {
const ui32 lineSize = 32;
const ui32 blockSize = 256;
const ui32 segmentsPerBlock = blockSize / lineSize;
const ui32 numBlocks = CeilDivide(numSegments, segmentsPerBlock);
SegmentedReduceWarpPartPerSegmentImpl<T, blockSize, lineSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
} else {
const ui32 blockSize = 512;
const ui32 numBlocks = numSegments;
SegmentedReduceBlockPerSegmentImpl<T, blockSize> << < min(numBlocks, (ui32)TArchProps::MaxBlockCount()), blockSize, 0, stream >> >(input, beginOffsets, endOffsets, numSegments, output, numBlocks);
}
return cudaSuccess;
}
default: {
return cudaErrorNotYetImplemented;
}
}
} else {
switch (type) {
case EOperatorType::Sum: {
return cub::DeviceSegmentedReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output, numSegments,
beginOffsets, endOffsets,
cub::Sum(),
T(),
stream);
}
case EOperatorType::Max: {
return cub::DeviceSegmentedReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output,
numSegments,
beginOffsets, endOffsets,
cub::Max(),
T(),
stream);
}
case EOperatorType::Min: {
return cub::DeviceSegmentedReduce::Reduce(context.TempStorage, context.TempStorageSize,
input, output,
numSegments,
beginOffsets, endOffsets,
cub::Min(),
T(),
stream);
}
default: {
return cudaErrorNotYetImplemented;
}
}
}
}
#define REDUCE(Type) \
template cudaError_t Reduce<Type>(const Type* input, Type* output, ui32 size, EOperatorType type, TCubKernelContext& context, TCudaStream stream);
REDUCE(float)
REDUCE(ui32)
REDUCE(int)
template cudaError_t SegmentedReduce<float>(const float* input, ui32 size, const ui32* offsets, ui32 numSegments, float* output,
EOperatorType type,
TCubKernelContext& context,
TCudaStream stream);
template cudaError_t ReduceByKey<float, ui32>(const float* input, const ui32* keys, ui32 size,
float* output, ui32* outKeys, ui32* outputSize,
EOperatorType type,
TCubKernelContext& context,
TCudaStream stream);
}
|
a286edfce7e7db8d4296238ca527ceb64e2def84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <numeric>
#include <vector>
#include <algorithm>
#include <stdio.h>
#include <cufftMp.h>
#include <mpi.h>
#include "../common/error_checks.hpp"
#include "../common/generate_random.hpp"
#include "../common/scaling.cuh"
#include "../iterators/box_iterator.hpp"
/**
* This samples illustrates a basic use of cuFFTMp using the built-in, optimized, data distributions
* in the case of an R2C - C2R transform
*
* It performs
* - forward transform
* - printing and scaling of the entries
* - inverse transform
*/
void run_r2c_c2r(size_t nx, size_t ny, size_t nz, float* cpu_data, const int rank, const int size, MPI_Comm comm) {
// Initialize plans and stream
hipfftHandle plan_r2c = 0;
hipfftHandle plan_c2r = 0;
hipStream_t stream = nullptr;
CUDA_CHECK(hipStreamCreate(&stream));
CUFFT_CHECK(hipfftCreate(&plan_r2c));
CUFFT_CHECK(hipfftCreate(&plan_c2r));
// Attach the MPI communicator to the plans
CUFFT_CHECK(cufftMpAttachComm(plan_r2c, CUFFT_COMM_MPI, &comm));
CUFFT_CHECK(cufftMpAttachComm(plan_c2r, CUFFT_COMM_MPI, &comm));
// Set the stream
CUFFT_CHECK(hipfftSetStream(plan_r2c, stream));
CUFFT_CHECK(hipfftSetStream(plan_c2r, stream));
// Make the plan
size_t workspace;
CUFFT_CHECK(hipfftMakePlan3d(plan_r2c, nx, ny, nz, HIPFFT_R2C, &workspace));
CUFFT_CHECK(hipfftMakePlan3d(plan_c2r, nx, ny, nz, HIPFFT_C2R, &workspace));
// Allocate GPU memory, copy CPU data to GPU
// Data is initially distributed according to CUFFT_XT_FORMAT_INPLACE
cudaLibXtDesc *desc;
CUFFT_CHECK(cufftXtMalloc(plan_r2c, &desc, CUFFT_XT_FORMAT_INPLACE));
CUFFT_CHECK(cufftXtMemcpy(plan_r2c, (void*)desc, (void*)cpu_data, CUFFT_COPY_HOST_TO_DEVICE));
// Run R2C
CUFFT_CHECK(cufftXtExecDescriptor(plan_r2c, desc, desc, HIPFFT_FORWARD));
// At this point, data is distributed according to CUFFT_XT_FORMAT_INPLACE_SHUFFLED
// This applies an element-wise scaling function to the GPU data located in desc->descriptor->data[0]
auto [begin_d, end_d] = BoxIterators(CUFFT_XT_FORMAT_INPLACE_SHUFFLED, HIPFFT_R2C,
rank, size, nx, ny, nz, (hipfftComplex*)desc->descriptor->data[0]);
const size_t num_elements = std::distance(begin_d, end_d);
const size_t num_threads = 128;
const size_t num_blocks = (num_elements + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( scaling_kernel), dim3(num_blocks), dim3(num_threads), 0, stream, begin_d, end_d, rank, size, nx, ny, nz);
// Run C2R
CUFFT_CHECK(cufftXtExecDescriptor(plan_c2r, desc, desc, HIPFFT_BACKWARD));
// Copy back to CPU and free
// Data is again distributed according to CUFFT_XT_FORMAT_INPLACE
CUDA_CHECK(hipStreamSynchronize(stream));
CUFFT_CHECK(cufftXtMemcpy(plan_c2r, (void*)cpu_data, (void*)desc, CUFFT_COPY_DEVICE_TO_HOST));
CUFFT_CHECK(cufftXtFree(desc));
CUFFT_CHECK(hipfftDestroy(plan_r2c));
CUFFT_CHECK(hipfftDestroy(plan_c2r));
CUDA_CHECK(hipStreamDestroy(stream));
};
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int ndevices;
CUDA_CHECK(hipGetDeviceCount(&ndevices));
CUDA_CHECK(hipSetDevice(rank % ndevices));
printf("Hello from rank %d/%d using GPU %d\n", rank, size, rank % ndevices);
// Logical transform size
size_t nx = size; // any value >= size is OK
size_t ny = size; // any value >= size is OK
size_t nz = 2 * size; // need to be even and >= size
// We start with Slabs distributed along X (X-Slabs)
// Ranks 0 ... (nx % size - 1) own 1 more element in the X dimension
// All ranks own all element in the Y and Z dimension
// The Z dimension has to be padded to accomodate the (nz / 2 + 1)
// complex numbers assuming an in-place data layout.
int ranks_with_onemore = nx % size;
size_t my_nx = (nx / size) + (rank < ranks_with_onemore ? 1 : 0);
size_t padded_nz = 2 * (nz / 2 + 1);
// Local, distributed, data
std::vector<float> data(my_nx * ny * padded_nz, 1.0);
generate_random(data, rank);
std::vector<float> ref = data;
// R2C + scaling + C2R
run_r2c_c2r(nx, ny, nz, data.data(), rank, size, MPI_COMM_WORLD);
// Compute error
double error = compute_error(ref, data, buildBox3D(CUFFT_XT_FORMAT_INPLACE, HIPFFT_R2C, rank, size, nx, ny, nz));
MPI_Finalize();
return assess_error(error);
}
| a286edfce7e7db8d4296238ca527ceb64e2def84.cu | #include <numeric>
#include <vector>
#include <algorithm>
#include <stdio.h>
#include <cufftMp.h>
#include <mpi.h>
#include "../common/error_checks.hpp"
#include "../common/generate_random.hpp"
#include "../common/scaling.cuh"
#include "../iterators/box_iterator.hpp"
/**
* This samples illustrates a basic use of cuFFTMp using the built-in, optimized, data distributions
* in the case of an R2C - C2R transform
*
* It performs
* - forward transform
* - printing and scaling of the entries
* - inverse transform
*/
void run_r2c_c2r(size_t nx, size_t ny, size_t nz, float* cpu_data, const int rank, const int size, MPI_Comm comm) {
// Initialize plans and stream
cufftHandle plan_r2c = 0;
cufftHandle plan_c2r = 0;
cudaStream_t stream = nullptr;
CUDA_CHECK(cudaStreamCreate(&stream));
CUFFT_CHECK(cufftCreate(&plan_r2c));
CUFFT_CHECK(cufftCreate(&plan_c2r));
// Attach the MPI communicator to the plans
CUFFT_CHECK(cufftMpAttachComm(plan_r2c, CUFFT_COMM_MPI, &comm));
CUFFT_CHECK(cufftMpAttachComm(plan_c2r, CUFFT_COMM_MPI, &comm));
// Set the stream
CUFFT_CHECK(cufftSetStream(plan_r2c, stream));
CUFFT_CHECK(cufftSetStream(plan_c2r, stream));
// Make the plan
size_t workspace;
CUFFT_CHECK(cufftMakePlan3d(plan_r2c, nx, ny, nz, CUFFT_R2C, &workspace));
CUFFT_CHECK(cufftMakePlan3d(plan_c2r, nx, ny, nz, CUFFT_C2R, &workspace));
// Allocate GPU memory, copy CPU data to GPU
// Data is initially distributed according to CUFFT_XT_FORMAT_INPLACE
cudaLibXtDesc *desc;
CUFFT_CHECK(cufftXtMalloc(plan_r2c, &desc, CUFFT_XT_FORMAT_INPLACE));
CUFFT_CHECK(cufftXtMemcpy(plan_r2c, (void*)desc, (void*)cpu_data, CUFFT_COPY_HOST_TO_DEVICE));
// Run R2C
CUFFT_CHECK(cufftXtExecDescriptor(plan_r2c, desc, desc, CUFFT_FORWARD));
// At this point, data is distributed according to CUFFT_XT_FORMAT_INPLACE_SHUFFLED
// This applies an element-wise scaling function to the GPU data located in desc->descriptor->data[0]
auto [begin_d, end_d] = BoxIterators(CUFFT_XT_FORMAT_INPLACE_SHUFFLED, CUFFT_R2C,
rank, size, nx, ny, nz, (cufftComplex*)desc->descriptor->data[0]);
const size_t num_elements = std::distance(begin_d, end_d);
const size_t num_threads = 128;
const size_t num_blocks = (num_elements + num_threads - 1) / num_threads;
scaling_kernel<<<num_blocks, num_threads, 0, stream>>>(begin_d, end_d, rank, size, nx, ny, nz);
// Run C2R
CUFFT_CHECK(cufftXtExecDescriptor(plan_c2r, desc, desc, CUFFT_INVERSE));
// Copy back to CPU and free
// Data is again distributed according to CUFFT_XT_FORMAT_INPLACE
CUDA_CHECK(cudaStreamSynchronize(stream));
CUFFT_CHECK(cufftXtMemcpy(plan_c2r, (void*)cpu_data, (void*)desc, CUFFT_COPY_DEVICE_TO_HOST));
CUFFT_CHECK(cufftXtFree(desc));
CUFFT_CHECK(cufftDestroy(plan_r2c));
CUFFT_CHECK(cufftDestroy(plan_c2r));
CUDA_CHECK(cudaStreamDestroy(stream));
};
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int ndevices;
CUDA_CHECK(cudaGetDeviceCount(&ndevices));
CUDA_CHECK(cudaSetDevice(rank % ndevices));
printf("Hello from rank %d/%d using GPU %d\n", rank, size, rank % ndevices);
// Logical transform size
size_t nx = size; // any value >= size is OK
size_t ny = size; // any value >= size is OK
size_t nz = 2 * size; // need to be even and >= size
// We start with Slabs distributed along X (X-Slabs)
// Ranks 0 ... (nx % size - 1) own 1 more element in the X dimension
// All ranks own all element in the Y and Z dimension
// The Z dimension has to be padded to accomodate the (nz / 2 + 1)
// complex numbers assuming an in-place data layout.
int ranks_with_onemore = nx % size;
size_t my_nx = (nx / size) + (rank < ranks_with_onemore ? 1 : 0);
size_t padded_nz = 2 * (nz / 2 + 1);
// Local, distributed, data
std::vector<float> data(my_nx * ny * padded_nz, 1.0);
generate_random(data, rank);
std::vector<float> ref = data;
// R2C + scaling + C2R
run_r2c_c2r(nx, ny, nz, data.data(), rank, size, MPI_COMM_WORLD);
// Compute error
double error = compute_error(ref, data, buildBox3D(CUFFT_XT_FORMAT_INPLACE, CUFFT_R2C, rank, size, nx, ny, nz));
MPI_Finalize();
return assess_error(error);
}
|
953e5f91eaec06013bb18d7224684eae4c5c0b8d.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include"matrix.h"
#include<time.h>
#include<sys/time.h>
#define ITER_CHECK 25 // status printed and convergence check every ITER_CHECK iterations
#define MAX_ITER 200 // max number of iterations
#define CONVERGE_THRESH 0 // set to zero to guarantee MAX_ITER iterations
#define TIMERS 10 // number of timers used in profiling (don't change)
//reduction tree parameters for:
int MN_params[] = {128, 32, 128, 8}; //M*N size reduction (whole matrix)
int N_params[] = {128,32,1,1}; //N size reductions (rows)
int M_params[] = {256,4,1,1}; //M size reductions (cols)
char *tname[] = {"total","sgemm","eps","vecdiv","vecmult","sumrows","sumcols","coldiv","rowdiv","check"};
void update_div(matrix W, matrix H, matrix X, const float thresh, const int max_iter, double* t, int verbose);
double get_time();
int main(int argc, char *argv[]){
//factor X into W*H
matrix W,H,X;
// read in matrix data:
// X - matrix to factorize
// W - initial W matrix
// H - initial H matrix
read_matrix(&W,"../W2.bin");
read_matrix(&X,"../X2.bin");
read_matrix(&H,"../H2.bin");
int max_iter;
if(argc > 1)
max_iter = atoi(argv[1]);
else
max_iter = MAX_ITER;
update_div(W,H,X,CONVERGE_THRESH,max_iter,NULL,1);
// copy results from GPU memory
//copy_matrix_from_device(&W);
//copy_matrix_from_device(&H);
// write results matrices to binary files
// (can be read with export_bin.m in Matlab)
write_matrix(W,"../Wout.bin");
write_matrix(H,"../Hout.bin");
destroy_matrix(&W);
destroy_matrix(&H);
destroy_matrix(&X);
return 0;
}
double get_time(){
//output time in microseconds
//the following line is required for function-wise timing to work,
//but it slows down overall execution time.
//comment out for faster execution
hipDeviceSynchronize();
struct timeval t;
gettimeofday(&t,NULL);
return (double)(t.tv_sec+t.tv_usec/1E6);
}
void update_div(matrix W0, matrix H0, matrix X0, const float thresh, const int max_iter, double *t,int verbose){
//run iterative multiplicative updates on W,H
hipblasInit();
const int M = W0.dim[0];
const int K = W0.dim[1];
const int N = H0.dim[1];
const int PAD_MULT = 32;
int M_padded = M;
if (M%PAD_MULT != 0)
M_padded = M + (PAD_MULT - (M % PAD_MULT));
int K_padded = K;
if (K%PAD_MULT != 0)
K_padded = K + (PAD_MULT - (K % PAD_MULT));
int N_padded = N;
if (N%PAD_MULT != 0)
N_padded = N + (PAD_MULT - (N % PAD_MULT));
//copy host matrices to device memory
copy_matrix_to_device(&W0);
copy_matrix_to_device(&H0);
copy_matrix_to_device(&X0);
//matrix to hold W*H
matrix WH0;
create_matrix_on_device(&WH0,M,N,0.0);
int i;
double t_array[TIMERS];
if(t==NULL)
t = t_array;
for(i=0;i<TIMERS;i++)
t[i] = 0;
// compute initial divergence and error
float diff,div,change,prev_diff,prev_div;
matrix_multiply_d(W0,H0,WH0);
diff = matrix_difference_norm_d(compute,X0,WH0,MN_params);
div = matrix_div_d(compute,X0,WH0,MN_params);
if(verbose)
printf("i: %4i, error: %6.4f, initial div: %8.4e\n",0,diff,div);
// free device memory for unpadded matrices
free_matrix_on_device(&W0);
free_matrix_on_device(&H0);
free_matrix_on_device(&X0);
free_matrix_on_device(&WH0);
//initialize temp matrices -----------------------
//matrix to hold X./(W*H+EPS)
matrix Z;
create_matrix_on_device(&Z,M_padded,N_padded,0.0);
//matrix to hold W'*Z
matrix WtZ;
create_matrix_on_device(&WtZ,K_padded,N_padded,0.0);
//matrix to hold Z*H'
matrix ZHt;
create_matrix_on_device(&ZHt,M_padded,K_padded,0.0);
//matrix to hold sum(W) [sum of cols of W]
matrix sumW;
create_matrix_on_device(&sumW,1,K_padded,0.0);
//matrix to hold sum(H,2) [sum of rows of H]
matrix sumH2;
create_matrix_on_device(&sumH2,K_padded,1,0.0);
//matrices to hold padded versions of matrices
matrix W;
create_matrix_on_device(&W,M_padded,K_padded,0.0);
matrix H;
create_matrix_on_device(&H,K_padded,N_padded,0.0);
matrix X;
create_matrix_on_device(&X,M_padded,N_padded,0.0);
// move host matrices to padded device memory
copy_matrix_to_device_padded(W0,W);
copy_matrix_to_device_padded(H0,H);
copy_matrix_to_device_padded(X0,X);
t[0] -= get_time();
for(i=0;i<max_iter;i++){
//check for convergence, print status
t[9] -= get_time();
if(i % ITER_CHECK == 0 && i != 0){
//copy_from_padded(W0,W);
//copy_from_padded(H0,H);
matrix_multiply_d(W,H,Z);
prev_diff = diff;
diff = matrix_difference_norm_d(compute,X,Z,MN_params);
//prev_div = div;
//div = matrix_div_d(compute,X0,WH0,128,32,128,4);
//change = (prev_div-div)/prev_div;
change = (prev_diff-diff)/prev_diff;
if(verbose)
printf("i: %4i, error: %6.4f, %% change: %8.5f\n",
i,diff,change);
if(change < thresh){
printf("converged\n");
break;
}
}
t[9] += get_time();
/* matlab algorithm
Z = X./(W*H+eps); H = H.*(W'*Z)./(repmat(sum(W)',1,F));
Z = X./(W*H+eps);
W = W.*(Z*H')./(repmat(sum(H,2)',N,1));
*/
//
// UPDATE H -----------------------------
//
//WH = W*H
t[1] -= get_time();
matrix_multiply_d(W,H,Z);
t[1] += get_time();
//WH = WH+EPS
t[2] -= get_time();
matrix_eps_d(Z,BLOCK_SIZE);
t[2] += get_time();
//Z = X./WH
t[3] -= get_time();
element_divide_d(X,Z,Z,BLOCK_SIZE);
t[3] += get_time();
//sum cols of W into row vector
t[6] -= get_time();
sum_cols_d(compute,W,sumW,M_params);
matrix_eps_d(sumW,32);
t[6] += get_time();
//convert sumW to col vector
sumW.dim[0] = sumW.dim[1];
sumW.dim[1] = 1;
//WtZ = W'*Z
t[1] -= get_time();
matrix_multiply_AtB_d(W,Z,WtZ);
t[1] += get_time();
//WtZ = WtZ./(repmat(sum(W)',1,H.dim[1])
//[element divide cols of WtZ by sumW']
t[7] -= get_time();
col_divide_d(WtZ,sumW,WtZ);
t[7] += get_time();
//H = H.*WtZ
t[4] -= get_time();
element_multiply_d(H,WtZ,H);
t[4] += get_time();
//
// UPDATE W ---------------------------
//
//WH = W*H
t[1] -= get_time();
matrix_multiply_d(W,H,Z);
t[1] += get_time();
//WH = WH+EPS
t[2] -= get_time();
matrix_eps_d(Z,BLOCK_SIZE);
t[2] += get_time();
//Z = X./WH
t[3] -= get_time();
element_divide_d(X,Z,Z,BLOCK_SIZE);
t[3] += get_time();
//sum rows of H into col vector
t[5] -= get_time();
sum_rows_d(compute,H,sumH2,N_params);
matrix_eps_d(sumH2,32);
t[5] += get_time();
//convert sumH2 to row vector
sumH2.dim[1] = sumH2.dim[0];
sumH2.dim[0] = 1;
//ZHt = Z*H'
t[1] -= get_time();
matrix_multiply_ABt_d(Z,H,ZHt);
t[1] += get_time();
//ZHt = ZHt./(repmat(sum(H,2)',W.dim[0],1)
//[element divide rows of ZHt by sumH2']
t[8] -= get_time();
row_divide_d(ZHt,sumH2,ZHt);
t[8] += get_time();
//W = W.*ZHt
t[4] -= get_time();
element_multiply_d(W,ZHt,W);
t[4] += get_time();
// ------------------------------------
//reset sumW to row vector
sumW.dim[1] = sumW.dim[0];
sumW.dim[0] = 1;
//reset sumH2 to col vector
sumH2.dim[0] = sumH2.dim[1];
sumH2.dim[1] = 1;
// ---------------------------------------
}
t[0] += get_time();
//reallocate unpadded device memory
allocate_matrix_on_device(&W0);
allocate_matrix_on_device(&H0);
//copy padded matrix to unpadded matrices
copy_from_padded(W0,W);
copy_from_padded(H0,H);
// free padded matrices
destroy_matrix(&W);
destroy_matrix(&H);
destroy_matrix(&X);
// free temp matrices
destroy_matrix(&Z);
destroy_matrix(&WtZ);
destroy_matrix(&ZHt);
destroy_matrix(&sumW);
destroy_matrix(&sumH2);
copy_matrix_to_device(&X0);
create_matrix_on_device(&WH0,M,N,0.0);
// copy device results to host memory
copy_matrix_from_device(&W0);
copy_matrix_from_device(&H0);
// evaluate final results
matrix_multiply_d(W0,H0,WH0);
diff = matrix_difference_norm_d(compute,X0,WH0,MN_params);
prev_div = div;
div = matrix_div_d(compute,X0,WH0,MN_params);
change = (prev_div-div)/prev_div;
if(verbose){
printf("i: %4i, error: %6.4f,\n\tfinal div: %8.4e, %% div change: %8.5f\n",
i,diff,div,change);
printf("\n");
for(i=0;i<TIMERS;i++)
printf("t[%i]: %8.3f (%6.2f %%) %s\n",i,t[i],t[i]/t[0]*100,tname[i]);
}
//clean up extra reduction memory
matrix_difference_norm_d(cleanup,X0,WH0,MN_params);
matrix_div_d(cleanup,X0,WH0,MN_params);
sum_cols_d(cleanup,W,sumW,M_params);
sum_rows_d(cleanup,H,sumH2,N_params);
// free device memory for unpadded matrices
free_matrix_on_device(&W0);
free_matrix_on_device(&H0);
free_matrix_on_device(&X0);
// free temp matrices
destroy_matrix(&WH0);
hipblasShutdown();
}
| 953e5f91eaec06013bb18d7224684eae4c5c0b8d.cu | #include<stdio.h>
#include<stdlib.h>
#include"matrix.h"
#include<time.h>
#include<sys/time.h>
#define ITER_CHECK 25 // status printed and convergence check every ITER_CHECK iterations
#define MAX_ITER 200 // max number of iterations
#define CONVERGE_THRESH 0 // set to zero to guarantee MAX_ITER iterations
#define TIMERS 10 // number of timers used in profiling (don't change)
//reduction tree parameters for:
int MN_params[] = {128, 32, 128, 8}; //M*N size reduction (whole matrix)
int N_params[] = {128,32,1,1}; //N size reductions (rows)
int M_params[] = {256,4,1,1}; //M size reductions (cols)
char *tname[] = {"total","sgemm","eps","vecdiv","vecmult","sumrows","sumcols","coldiv","rowdiv","check"};
void update_div(matrix W, matrix H, matrix X, const float thresh, const int max_iter, double* t, int verbose);
double get_time();
int main(int argc, char *argv[]){
//factor X into W*H
matrix W,H,X;
// read in matrix data:
// X - matrix to factorize
// W - initial W matrix
// H - initial H matrix
read_matrix(&W,"../W2.bin");
read_matrix(&X,"../X2.bin");
read_matrix(&H,"../H2.bin");
int max_iter;
if(argc > 1)
max_iter = atoi(argv[1]);
else
max_iter = MAX_ITER;
update_div(W,H,X,CONVERGE_THRESH,max_iter,NULL,1);
// copy results from GPU memory
//copy_matrix_from_device(&W);
//copy_matrix_from_device(&H);
// write results matrices to binary files
// (can be read with export_bin.m in Matlab)
write_matrix(W,"../Wout.bin");
write_matrix(H,"../Hout.bin");
destroy_matrix(&W);
destroy_matrix(&H);
destroy_matrix(&X);
return 0;
}
double get_time(){
//output time in microseconds
//the following line is required for function-wise timing to work,
//but it slows down overall execution time.
//comment out for faster execution
cudaThreadSynchronize();
struct timeval t;
gettimeofday(&t,NULL);
return (double)(t.tv_sec+t.tv_usec/1E6);
}
void update_div(matrix W0, matrix H0, matrix X0, const float thresh, const int max_iter, double *t,int verbose){
//run iterative multiplicative updates on W,H
cublasInit();
const int M = W0.dim[0];
const int K = W0.dim[1];
const int N = H0.dim[1];
const int PAD_MULT = 32;
int M_padded = M;
if (M%PAD_MULT != 0)
M_padded = M + (PAD_MULT - (M % PAD_MULT));
int K_padded = K;
if (K%PAD_MULT != 0)
K_padded = K + (PAD_MULT - (K % PAD_MULT));
int N_padded = N;
if (N%PAD_MULT != 0)
N_padded = N + (PAD_MULT - (N % PAD_MULT));
//copy host matrices to device memory
copy_matrix_to_device(&W0);
copy_matrix_to_device(&H0);
copy_matrix_to_device(&X0);
//matrix to hold W*H
matrix WH0;
create_matrix_on_device(&WH0,M,N,0.0);
int i;
double t_array[TIMERS];
if(t==NULL)
t = t_array;
for(i=0;i<TIMERS;i++)
t[i] = 0;
// compute initial divergence and error
float diff,div,change,prev_diff,prev_div;
matrix_multiply_d(W0,H0,WH0);
diff = matrix_difference_norm_d(compute,X0,WH0,MN_params);
div = matrix_div_d(compute,X0,WH0,MN_params);
if(verbose)
printf("i: %4i, error: %6.4f, initial div: %8.4e\n",0,diff,div);
// free device memory for unpadded matrices
free_matrix_on_device(&W0);
free_matrix_on_device(&H0);
free_matrix_on_device(&X0);
free_matrix_on_device(&WH0);
//initialize temp matrices -----------------------
//matrix to hold X./(W*H+EPS)
matrix Z;
create_matrix_on_device(&Z,M_padded,N_padded,0.0);
//matrix to hold W'*Z
matrix WtZ;
create_matrix_on_device(&WtZ,K_padded,N_padded,0.0);
//matrix to hold Z*H'
matrix ZHt;
create_matrix_on_device(&ZHt,M_padded,K_padded,0.0);
//matrix to hold sum(W) [sum of cols of W]
matrix sumW;
create_matrix_on_device(&sumW,1,K_padded,0.0);
//matrix to hold sum(H,2) [sum of rows of H]
matrix sumH2;
create_matrix_on_device(&sumH2,K_padded,1,0.0);
//matrices to hold padded versions of matrices
matrix W;
create_matrix_on_device(&W,M_padded,K_padded,0.0);
matrix H;
create_matrix_on_device(&H,K_padded,N_padded,0.0);
matrix X;
create_matrix_on_device(&X,M_padded,N_padded,0.0);
// move host matrices to padded device memory
copy_matrix_to_device_padded(W0,W);
copy_matrix_to_device_padded(H0,H);
copy_matrix_to_device_padded(X0,X);
t[0] -= get_time();
for(i=0;i<max_iter;i++){
//check for convergence, print status
t[9] -= get_time();
if(i % ITER_CHECK == 0 && i != 0){
//copy_from_padded(W0,W);
//copy_from_padded(H0,H);
matrix_multiply_d(W,H,Z);
prev_diff = diff;
diff = matrix_difference_norm_d(compute,X,Z,MN_params);
//prev_div = div;
//div = matrix_div_d(compute,X0,WH0,128,32,128,4);
//change = (prev_div-div)/prev_div;
change = (prev_diff-diff)/prev_diff;
if(verbose)
printf("i: %4i, error: %6.4f, %% change: %8.5f\n",
i,diff,change);
if(change < thresh){
printf("converged\n");
break;
}
}
t[9] += get_time();
/* matlab algorithm
Z = X./(W*H+eps); H = H.*(W'*Z)./(repmat(sum(W)',1,F));
Z = X./(W*H+eps);
W = W.*(Z*H')./(repmat(sum(H,2)',N,1));
*/
//
// UPDATE H -----------------------------
//
//WH = W*H
t[1] -= get_time();
matrix_multiply_d(W,H,Z);
t[1] += get_time();
//WH = WH+EPS
t[2] -= get_time();
matrix_eps_d(Z,BLOCK_SIZE);
t[2] += get_time();
//Z = X./WH
t[3] -= get_time();
element_divide_d(X,Z,Z,BLOCK_SIZE);
t[3] += get_time();
//sum cols of W into row vector
t[6] -= get_time();
sum_cols_d(compute,W,sumW,M_params);
matrix_eps_d(sumW,32);
t[6] += get_time();
//convert sumW to col vector
sumW.dim[0] = sumW.dim[1];
sumW.dim[1] = 1;
//WtZ = W'*Z
t[1] -= get_time();
matrix_multiply_AtB_d(W,Z,WtZ);
t[1] += get_time();
//WtZ = WtZ./(repmat(sum(W)',1,H.dim[1])
//[element divide cols of WtZ by sumW']
t[7] -= get_time();
col_divide_d(WtZ,sumW,WtZ);
t[7] += get_time();
//H = H.*WtZ
t[4] -= get_time();
element_multiply_d(H,WtZ,H);
t[4] += get_time();
//
// UPDATE W ---------------------------
//
//WH = W*H
t[1] -= get_time();
matrix_multiply_d(W,H,Z);
t[1] += get_time();
//WH = WH+EPS
t[2] -= get_time();
matrix_eps_d(Z,BLOCK_SIZE);
t[2] += get_time();
//Z = X./WH
t[3] -= get_time();
element_divide_d(X,Z,Z,BLOCK_SIZE);
t[3] += get_time();
//sum rows of H into col vector
t[5] -= get_time();
sum_rows_d(compute,H,sumH2,N_params);
matrix_eps_d(sumH2,32);
t[5] += get_time();
//convert sumH2 to row vector
sumH2.dim[1] = sumH2.dim[0];
sumH2.dim[0] = 1;
//ZHt = Z*H'
t[1] -= get_time();
matrix_multiply_ABt_d(Z,H,ZHt);
t[1] += get_time();
//ZHt = ZHt./(repmat(sum(H,2)',W.dim[0],1)
//[element divide rows of ZHt by sumH2']
t[8] -= get_time();
row_divide_d(ZHt,sumH2,ZHt);
t[8] += get_time();
//W = W.*ZHt
t[4] -= get_time();
element_multiply_d(W,ZHt,W);
t[4] += get_time();
// ------------------------------------
//reset sumW to row vector
sumW.dim[1] = sumW.dim[0];
sumW.dim[0] = 1;
//reset sumH2 to col vector
sumH2.dim[0] = sumH2.dim[1];
sumH2.dim[1] = 1;
// ---------------------------------------
}
t[0] += get_time();
//reallocate unpadded device memory
allocate_matrix_on_device(&W0);
allocate_matrix_on_device(&H0);
//copy padded matrix to unpadded matrices
copy_from_padded(W0,W);
copy_from_padded(H0,H);
// free padded matrices
destroy_matrix(&W);
destroy_matrix(&H);
destroy_matrix(&X);
// free temp matrices
destroy_matrix(&Z);
destroy_matrix(&WtZ);
destroy_matrix(&ZHt);
destroy_matrix(&sumW);
destroy_matrix(&sumH2);
copy_matrix_to_device(&X0);
create_matrix_on_device(&WH0,M,N,0.0);
// copy device results to host memory
copy_matrix_from_device(&W0);
copy_matrix_from_device(&H0);
// evaluate final results
matrix_multiply_d(W0,H0,WH0);
diff = matrix_difference_norm_d(compute,X0,WH0,MN_params);
prev_div = div;
div = matrix_div_d(compute,X0,WH0,MN_params);
change = (prev_div-div)/prev_div;
if(verbose){
printf("i: %4i, error: %6.4f,\n\tfinal div: %8.4e, %% div change: %8.5f\n",
i,diff,div,change);
printf("\n");
for(i=0;i<TIMERS;i++)
printf("t[%i]: %8.3f (%6.2f %%) %s\n",i,t[i],t[i]/t[0]*100,tname[i]);
}
//clean up extra reduction memory
matrix_difference_norm_d(cleanup,X0,WH0,MN_params);
matrix_div_d(cleanup,X0,WH0,MN_params);
sum_cols_d(cleanup,W,sumW,M_params);
sum_rows_d(cleanup,H,sumH2,N_params);
// free device memory for unpadded matrices
free_matrix_on_device(&W0);
free_matrix_on_device(&H0);
free_matrix_on_device(&X0);
// free temp matrices
destroy_matrix(&WH0);
cublasShutdown();
}
|
281654341f72f623925fce47fe2eb513627759f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parallelTest.cuh"
void testKeepMoleculeInBox()
{
double X = 10.0;
double Y = 10.0;
double Z = 10.0;
double temp = 100.0;
double maxTrans = .5;
int numOfAtoms = 3;
double cutoff = 9.0;
double maxRot = 15.0;
Environment enviro = createEnvironment(X, Y, Z, maxTrans, temp, numOfAtoms, cutoff, maxRot);
//test molecule compeletely outside of box.
Atom a1 = createAtom(1, 11, 10.3, 5);
Atom a2 = createAtom(2, 12.4, 1.2, 5);
Atom a3 = createAtom(3, 8.1, 2, 1.5);
Atom *atoms = (Atom *)malloc(sizeof(Atom) * numOfAtoms);
atoms[0] = a1;
atoms[1] = a2;
atoms[2] = a3;
Molecule molec;
molec.atoms = atoms;
molec.numOfAtoms = numOfAtoms;
double expectedA1[] = {2.9, 9.1, 5};
double expectedA2[] = {4.3, 0, 5};
double expectedA3[] = {0, .8, 1.5};
double **answers = (double **)malloc(sizeof(double) * 9);
answers[0] = expectedA1;
answers[1] = expectedA2;
answers[2] = expectedA3;
keepMoleculeInBox(&molec, &enviro);
double precision = .0001;
for(int i = 0; i < numOfAtoms; i++)
{
double expectedX = answers[i][0];
double actualX = molec.atoms[i].x;
double expectedY = answers[i][1];
double actualY = molec.atoms[i].y;
double expectedZ = answers[i][2];
double actualZ = molec.atoms[i].z;
assert(fabs(expectedX - actualX) < precision);
assert(fabs(expectedY - actualY) < precision);
assert(fabs(expectedZ - actualZ) < precision);
}
cout << "keepMoleculeInBox() passed tests" << endl;
printAtoms(molec.atoms, 3);
}
void setupGetIndexTest()
{
int numberOfBlocks = 3;
int threadsPerBlock = 2;
int totalTests = numberOfBlocks * threadsPerBlock;
int *xValues;
int *yValues;
int *yValues_device;
int *xValues_device;
size_t xSize = totalTests * sizeof(int);
yValues = (int *) malloc(xSize);
xValues = (int *)malloc(xSize);
hipMalloc((void **) &yValues_device, xSize);
hipMalloc((void **) &xValues_device, xSize);
hipLaunchKernelGGL(( testGetXKernel) , dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, xValues_device, totalTests);
hipMemcpy(xValues, xValues_device, xSize, hipMemcpyDeviceToHost);
assert(xValues[0] == 1);
assert(xValues[1] == 2);
assert(xValues[2] == 2);
assert(xValues[3] == 3);
assert(xValues[4] == 3);
assert(xValues[5] == 3);
printf("getXFromIndex Correct\n");
//test getYFromIndex
hipLaunchKernelGGL(( testGetYKernel) , dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, xValues_device,
yValues_device, totalTests);
hipMemcpy(yValues, yValues_device, xSize, hipMemcpyDeviceToHost);
assert(yValues[0] == 0);
assert(yValues[1] == 0);
assert(yValues[2] == 1);
assert(yValues[3] == 0);
assert(yValues[4] == 1);
assert(yValues[5] == 2);
printf("getYFromIndex Correct.\n");
hipFree(xValues_device);
hipFree(yValues_device);
free(yValues);
free(xValues);
}
bool compareDouble(double a, double b, double limit)
{
if((a - b) / b < limit)
{
return true;
}
else
{
return false;
}
}
void setupMakePeriodic()
{
srand(time(NULL));
int numberOfTests = 128;
double *box;;
double *inputs_host;
double *inputs_device;
double *outputs_host;
double *dev_box;
size_t inputSize = sizeof(double) * numberOfTests;
box = (double *) malloc(sizeof(double));
*box = 10.0;
inputs_host = (double *) malloc(inputSize);
outputs_host = (double *) malloc(inputSize);
hipMalloc((void **) &inputs_device, inputSize);
hipMalloc((void **) &dev_box, sizeof(double));
//generate random numbers
for(int i = 0; i < numberOfTests; i++)
{
inputs_host[i] = ((double) (rand() % 100));
}
//copy data to device
hipMemcpy(inputs_device, inputs_host, inputSize, hipMemcpyHostToDevice);
hipMemcpy(dev_box, box, sizeof(double), hipMemcpyHostToDevice);
int threadsPerBlock = numberOfTests / 2;
int blocks = numberOfTests / threadsPerBlock + (numberOfTests % threadsPerBlock == 0 ? 0 : 1);
hipLaunchKernelGGL(( testMakePeriodicKernel) , dim3(blocks), dim3(threadsPerBlock) , 0, 0, inputs_device, dev_box, numberOfTests);
hipMemcpy(outputs_host, inputs_device, inputSize, hipMemcpyDeviceToHost);
//check that values are the same as known correct function
for(int i = 0; i < numberOfTests; i++)
{
double test_output = make_periodic(inputs_host[i], *box);
assert(outputs_host[i] == test_output);
}
printf("makePeriodic passed Tests\n");
free(inputs_host);
free(outputs_host);
hipFree(inputs_device);
}
void testWrapBox()
{
srand(time(NULL));
int numberOfTests = 128;
double box;
double *testDoubles;
size_t inputSize = sizeof(double) * numberOfTests;
box = 10.f;
testDoubles = (double *) malloc(inputSize);
//generate random numbers
for(int i = 0; i < numberOfTests; i++)
{
testDoubles[i] = (double) rand() / (double) RAND_MAX;
}
//check that values are the same as known correct function
for(int i = 0; i < numberOfTests; i++)
{
double test_output = wrap_into_box(testDoubles[i], box);
assert(wrapBox(testDoubles[i], box) == test_output);
}
printf("wrapBox passed Tests\n");
free(testDoubles);
}
void setupCalc_lj()
{
double kryptonSigma = 3.624;
double kryptonEpsilon = 0.317;
int numberOfAtoms = 2;
Atom *atoms = new Atom[numberOfAtoms];
double *energy = (double *) malloc(sizeof(double));
*energy = 1000.f;
Atom *atoms_device;
Environment *enviro_device;
double *energy_device;
hipMalloc((void **) &atoms_device, sizeof(Atom) * numberOfAtoms);
hipMalloc((void **) &enviro_device, sizeof(Environment));
hipMalloc((void **) &energy_device, sizeof(double));
Environment stableEnviro = createEnvironment(10, 10, 10, .5, 298.15, numberOfAtoms, 9.0, 15.0);
Environment *enviro = &stableEnviro;
generatePoints(atoms, enviro);
atoms[0].sigma = kryptonSigma;
atoms[0].epsilon = kryptonEpsilon;
atoms[1].sigma = kryptonSigma;
atoms[1].epsilon = kryptonEpsilon;
hipMemcpy(atoms_device, atoms, sizeof(Atom) * numberOfAtoms, hipMemcpyHostToDevice);
hipMemcpy(enviro_device, enviro, sizeof(Environment), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( testCalcLJ), dim3(1),dim3(1), 0, 0, atoms_device, enviro_device, energy_device);
hipMemcpy(energy, energy_device, sizeof(double), hipMemcpyDeviceToHost);
double baseEnergy = calculate_energy(atoms, enviro);
assert((int)(*energy * pow(10.f, 6.f)) == (int)( baseEnergy * pow(10.f,6.f)));
printf("\nparallelEnergy = %2.10f\nlinearEnergy = %2.10f\n", *energy, baseEnergy);
printf("Calc_lj is correct\n");
free(atoms);
free(energy);
hipFree(atoms_device);
hipFree(enviro_device);
hipFree(energy_device);
}
void testGeneratePoints()
{
//init atoms, environment
int numberOfAtoms = 1000;
Atom *atoms = (Atom *) malloc(numberOfAtoms * sizeof(Atom));
for (int i = 0; i < numberOfAtoms; i++)
{
atoms[i] = createAtom(i, 0, 0, 0);
}
Environment enviro = createEnvironment(10.0, 20.0, 35.0, 1.0, 298.15, numberOfAtoms, 9.0, 15.0);
generatePoints(atoms, &enviro);
//assert that all atoms positions are in range of the box
for (int i = 0; i < numberOfAtoms; i++)
{
double dim_x = atoms[i].x;
double dim_y = atoms[i].y;
double dim_z = atoms[i].z;
assert(dim_x >= 0 && dim_x <= (enviro.x) &&
dim_y >= 0 && dim_y <= (enviro.y) &&
dim_z >= 0 && dim_z <= (enviro.z));
}
printf("testGeneratePoints (atoms) successful.\n");
for (int i = 0; i < numberOfAtoms; i++)
{
atoms[i] = createAtom(i, i % 2, i % 3, i % 4);
}
int numberOfMolecules = 250;
Molecule *molecules = (Molecule *)malloc(sizeof(Molecule)*numberOfMolecules);
for (int i = 0; i < numberOfMolecules; i++)
{
Bond *blankBonds = NULL;
Angle *blankAngles = NULL;
Dihedral *blankDihedrals = NULL;
int atomCount = numberOfAtoms / numberOfMolecules;
Atom *molAtoms = (Atom *) malloc(sizeof(Atom)*atomCount);
for (int j = 0; j < atomCount; j++)
{
molAtoms[j] = atoms[i*atomCount + j];
}
molecules[i] = createMolecule(-1, molAtoms, blankAngles, blankBonds, blankDihedrals, atomCount, 0, 0, 0);
}
generatePoints(molecules, &enviro);
for (int i = 0; i < numberOfMolecules; i++)
{
for (int j = 0; j < molecules[i].numOfAtoms; j++)
{
double dim_x = molecules[i].atoms[j].x;
double dim_y = molecules[i].atoms[j].y;
double dim_z = molecules[i].atoms[j].z;
assert(dim_x >= 0 && dim_x <= (enviro.x) &&
dim_y >= 0 && dim_y <= (enviro.y) &&
dim_z >= 0 && dim_z <= (enviro.z));
}
}
printf("testGeneratePoints (molecules) successful.\n");
free(atoms);
free(molecules);
}
void testCalcEnergy()
{
// the sigma value of nitrogen
double nSigma = 3.250;
// the epsilon value of nitrogen
double nEpsilon = 0.17;
// charge of nitrogen
double nCharge = -0.850;
struct timeval le_tvBegin, le_tvEnd, pl_tvBegin, pl_tvEnd;
//Generate enviorment and atoms
int numberOfAtoms = 1000;
Environment stableEnviro = createEnvironment(5.0, 10.0, 15.0, 1.0, 298.15, numberOfAtoms, 9.0, 15.0);
Environment *enviro = &stableEnviro;
Atom *atoms = new Atom[numberOfAtoms];
for (int i = 0; i < numberOfAtoms; i++)
{
atoms[i] = createAtom(i, 0.0, 0.0, 0.0, nSigma, nEpsilon);
atoms[i].charge = nCharge;
}
generatePoints(atoms, enviro);
//calculate energy linearly
gettimeofday(&le_tvBegin,NULL); //start clock for execution time
double te_linear = calculate_energy(atoms, enviro);
gettimeofday(&le_tvEnd,NULL); //stop clock for execution time
long le_runTime = timevaldiff(&le_tvBegin,&le_tvEnd); //get difference in time in milli seconds
//calculate energy in parallel
gettimeofday(&pl_tvBegin,NULL); //start clock for execution time
double te_parallel = calcEnergyWrapper(atoms, enviro);
gettimeofday(&pl_tvEnd,NULL); //start clock for execution time
long pl_runTime = timevaldiff(&pl_tvBegin,&pl_tvEnd); //get difference in time in milli seconds
//Print out Results
printf("Number of elements: %d\n", numberOfAtoms);
printf("Linear Total Energy: %f \n", te_linear);
printf("In %lu ms\n", le_runTime);
printf("Parallel Total Energy: %f \n", te_parallel);
printf("In %lu ms\n", pl_runTime);
assert(compareDouble(te_linear, te_parallel, .05));
printf("testCalcEnergy successful.\n");
}
void testCalcEnergyWithMolecules()
{
// the sigma value of nitrogen
double nSigma = 3.250;
// the epsilon value of nitrogen
double nEpsilon = 0.17;
// charge of nitrogen
double nCharge = -0.850;
// the sigma value of krypton used in the LJ simulation
double kryptonSigma = 3.624;
// the epsilon value of krypton used in the LJ simulation
double kryptonEpsilon = 0.317;
// charge of krypton
double kryptonCharge = 0.0;
struct timeval le_tvBegin, le_tvEnd, pl_tvBegin, pl_tvEnd;
//Generate enviorment and atoms
int numberOfAtoms = 500;
Environment stableEnviro = createEnvironment(5.0, 10.0, 15.0, 1.0, 298.15, numberOfAtoms, 9.0, 15.0);
Environment *enviro = &stableEnviro;
Atom *atoms = new Atom[numberOfAtoms];
for (int i = 0; i < numberOfAtoms; i++)
{
if ((i % 5) < 3)
{
atoms[i] = createAtom(i, 0.0, 0.0, 0.0, kryptonSigma, kryptonEpsilon);
atoms[i].charge = kryptonCharge;
}
else
{
atoms[i] = createAtom(i, 0.0, 0.0, 0.0, nSigma, nEpsilon);
atoms[i].charge = nCharge;
}
}
enviro->numOfMolecules = 100;
Molecule *molecules;
molecules = (Molecule *)malloc(sizeof(Molecule) * numberOfAtoms);
int atomCount = 0;
for(int i = 0; i < enviro->numOfMolecules; i++)
{
molecules[i].numOfAtoms = 5;
molecules[i].atoms = (Atom *)malloc(sizeof(Atom) * 5);
molecules[i].id = atomCount;
Hop *hops = (Hop *)malloc(sizeof(Hop) * 2);
hops[0] = createHop(atomCount, atomCount+3, 3);
hops[1] = createHop(atomCount, atomCount+4, 4);
molecules[i].hops = hops;
molecules[i].numOfHops = 2;
for (int j = 0; j < molecules[i].numOfAtoms; j++)
{
molecules[i].atoms[j] = atoms[atomCount];
atomCount++;
}
}
generatePoints(molecules, enviro);
for (int i = 0; i < enviro->numOfMolecules; i++)
{
for (int j = 0; j < molecules[i].numOfAtoms; j++)
{
atoms[i * 5 + j] = molecules[i].atoms[j];
}
}
//calculate energy linearly
gettimeofday(&le_tvBegin,NULL); //start clock for execution time
double te_linear = calculate_energy(atoms, enviro, molecules);
gettimeofday(&le_tvEnd,NULL); //stop clock for execution time
long le_runTime = timevaldiff(&le_tvBegin,&le_tvEnd); //get difference in time in milli seconds
//calculate energy in parallel
gettimeofday(&pl_tvBegin,NULL); //start clock for execution time
double te_parallel = calcEnergyWrapper(molecules, enviro);
gettimeofday(&pl_tvEnd,NULL); //start clock for execution time
long pl_runTime = timevaldiff(&pl_tvBegin,&pl_tvEnd); //get difference in time in milli seconds
//Print out Results
printf("Number of elements: %d\n", numberOfAtoms);
printf("Linear Total Energy: %f \n", te_linear);
printf("In %lu ms\n", le_runTime);
printf("Parallel Total Energy: %f \n", te_parallel);
printf("In %lu ms\n", pl_runTime);
assert(compareDouble(te_linear, te_parallel, .05));
printf("testCalcEnergyWithMolecules successful.\n");
}
void testGetMoleculeFromIDWrapper()
{
int numberOfAtoms = 11;
int numberOfMolecules = 3;
Atom *atoms;
DeviceMolecule *molecules;
Environment enviro;
int *answers;
Atom *atoms_device;
DeviceMolecule *molecules_device;
int *answers_device;
enviro.numOfAtoms = numberOfAtoms;
enviro.numOfMolecules = numberOfMolecules;
atoms = (Atom *)malloc(sizeof(Atom) * numberOfAtoms);
molecules = (DeviceMolecule *)malloc(sizeof(DeviceMolecule) *numberOfMolecules);
answers = (int *)malloc(sizeof(int) * numberOfAtoms);
hipMalloc((void **) &atoms_device, sizeof(Atom) * numberOfAtoms);
hipMalloc((void **) &molecules_device, sizeof(DeviceMolecule) * numberOfMolecules);
hipMalloc((void **) &answers_device, sizeof(int) * numberOfAtoms);
enviro.numOfAtoms = numberOfAtoms;
enviro.numOfMolecules = numberOfMolecules;
for(int i = 0; i < numberOfAtoms; i++)
{
atoms[i].id = i;
}
molecules[0].id = 0;
molecules[1].id = 2;
molecules[2].id = 6;
hipMemcpy(atoms_device, atoms, sizeof(Atom) * numberOfAtoms, hipMemcpyHostToDevice);
hipMemcpy(molecules_device, molecules, sizeof(DeviceMolecule) * numberOfMolecules, hipMemcpyHostToDevice);
int numberOfBlocks = 1;
int threadsPerBlock = 128;
hipLaunchKernelGGL(( testGetMoleculeFromID), dim3(numberOfBlocks),dim3(threadsPerBlock), 0, 0, atoms_device, molecules_device, enviro, numberOfAtoms, answers_device);
hipMemcpy(answers, answers_device, sizeof(int) * numberOfAtoms, hipMemcpyDeviceToHost);
assert(answers[0] == 0);
assert(answers[1] == 0);
assert(answers[2] == 2);
assert(answers[3] == 2);
assert(answers[4] == 2);
assert(answers[5] == 2);
assert(answers[6] == 6);
assert(answers[7] == 6);
assert(answers[8] == 6);
assert(answers[9] == 6);
assert(answers[10] == 6);
printf("getMoleculeFromID passed tests\n");
free(atoms);
free(molecules);
free(answers);
hipFree(atoms_device);
hipFree(molecules_device);
hipFree(answers_device);
}
void testCalcBlendingWrapper()
{
double *d1, *d2, *d1_device, *d2_device, *answers, *answers_device;
int numberOfTests = 5;
size_t doubleSize = sizeof(double) * numberOfTests;
d1 = (double *)malloc(doubleSize);
d2 = (double *)malloc(doubleSize);
answers = (double *)malloc(doubleSize);
hipMalloc((void **) &d1_device, doubleSize);
hipMalloc((void **) &d2_device, doubleSize);
hipMalloc((void **) &answers_device, doubleSize);
d1[0] = 0.f;
d2[0] = 0.f;
d1[1] = 4.5;
d2[1] = 2.32;
d1[2] = 52.34;
d2[2] = 5.f;
d1[3] = 1.f;
d2[3] = 7.f;
d1[4] = 34.56;
d2[4] = 12.7;
hipMemcpy(d1_device, d1, doubleSize, hipMemcpyHostToDevice);
hipMemcpy(d2_device, d2, doubleSize, hipMemcpyHostToDevice);
int blocks = 1;
int threadsPerBlock = 64;
hipLaunchKernelGGL(( testCalcBlending) , dim3(blocks), dim3(threadsPerBlock), 0, 0, d1_device, d2_device, answers_device, numberOfTests);
hipMemcpy(answers, answers_device, doubleSize, hipMemcpyDeviceToHost);
for(int i = 0 ; i < numberOfTests; i++)
{
double expected = sqrt(d1[i] * d2[i]);
assert(answers[i] / sqrt(d1[i] * d2[i]) < 0.01 || answers[i] == expected);
}
printf("calcBlending passed tests.\n");
free(d1);
free(d2);
free(answers);
hipFree(d1_device);
hipFree(d2_device);
hipFree(answers_device);
}
void testGetFValueWrapper()
{
Environment *enviro, *dev_enviro;
DeviceMolecule *molecules, *dev_molecules;
Atom *mol1_atoms, *mol2_atoms, *atom1List, *atom2List, *dev_atom1List, *dev_atom2List;
Hop *dev_hops;
double *fvalues, *dev_fvalues;
int numberOfTests = 5;
Environment stable_enviro = createEnvironment(5.0,5.0,5.0,1.0,270.0,5,9.0,15.0);
enviro = &stable_enviro;
mol1_atoms = (Atom *)malloc(sizeof(Atom)*5);
mol2_atoms = (Atom *)malloc(sizeof(Atom));
atom1List = (Atom *)malloc(sizeof(Atom)*numberOfTests);
atom2List = (Atom *)malloc(sizeof(Atom)*numberOfTests);
fvalues = (double *)malloc(sizeof(double)*4);
for (int i = 0; i < 5; i++)
{
mol1_atoms[i] = createAtom(i,1.0,1.0,1.0);
}
for (int i = 0; i < 5; i++)
{
atom1List[i] = mol1_atoms[0];
if (i < 4)
atom2List[i] = mol1_atoms[i+1];
}
mol2_atoms[0] = createAtom(5,1.0,1.0,1.0);
atom2List[4] = mol2_atoms[0];
Hop* hops = (Hop *)malloc(sizeof(Hop)*2);
hops[0] = createHop(0,3,3);
hops[1] = createHop(0,4,4);
molecules = (DeviceMolecule *)malloc(sizeof(DeviceMolecule)*2);
molecules[0] = createDeviceMolecule(0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 2);
molecules[1] = createDeviceMolecule(5, 5, 1, 0, 0, 0, 0, 0, 0, 2, 0);
hipMalloc((void **) &dev_enviro, sizeof(Environment));
hipMalloc((void **) &dev_molecules, sizeof(DeviceMolecule)*2);
hipMalloc((void **) &dev_atom1List, sizeof(Atom)*numberOfTests);
hipMalloc((void **) &dev_atom2List, sizeof(Atom)*numberOfTests);
hipMalloc((void **) &dev_fvalues, sizeof(double)*numberOfTests);
hipMalloc((void **) &dev_hops, sizeof(Hop)*2);
hipMemcpy(dev_enviro, enviro, sizeof(Environment), hipMemcpyHostToDevice);
hipMemcpy(dev_molecules, molecules, sizeof(DeviceMolecule)*2, hipMemcpyHostToDevice);
hipMemcpy(dev_atom1List, atom1List, sizeof(Atom)*numberOfTests, hipMemcpyHostToDevice);
hipMemcpy(dev_atom2List, atom2List, sizeof(Atom)*numberOfTests, hipMemcpyHostToDevice);
hipMemcpy(dev_hops, hops, sizeof(Hop)*2, hipMemcpyHostToDevice);
int blocks = 1;
int threadsPerBlock = 64;
hipLaunchKernelGGL(( testGetFValue) , dim3(blocks), dim3(threadsPerBlock), 0, 0, dev_atom1List, dev_atom2List, dev_molecules, dev_enviro, dev_fvalues, numberOfTests, dev_hops);
hipMemcpy(fvalues, dev_fvalues, sizeof(double)*numberOfTests, hipMemcpyDeviceToHost);
double *expected = (double *)malloc(sizeof(double)*numberOfTests);
expected[0] = 0.0;
expected[1] = 0.0;
expected[2] = 0.5;
expected[3] = 1.0;
expected[4] = 1.0;
for(int i = 0 ; i < numberOfTests; i++)
{
assert(expected[i] == fvalues[i]);
}
printf("testGetFValue passed tests.\n");
free(mol1_atoms);
free(mol2_atoms);
free(atom1List);
free(atom2List);
free(fvalues);
free(molecules);
free(hops);
hipFree(dev_enviro);
hipFree(dev_molecules);
hipFree(dev_atom1List);
hipFree(dev_atom2List);
hipFree(dev_fvalues);
hipFree(dev_hops);
}
Atom findMaxRotation(Atom pivot, Atom toRotate, double rotation){
toRotate.x -= pivot.x;
toRotate.y -= pivot.y;
toRotate.z -= pivot.z;
rotateAboutX(toRotate, rotation);
rotateAboutY(toRotate, rotation);
rotateAboutZ(toRotate, rotation);
toRotate.x += pivot.x;
toRotate.y += pivot.y;
toRotate.z += pivot.z;
return toRotate;
}
void testRotateMolecule()
{
srand(time(NULL));
//Testing on a molecule that is not totaly unlike water
double bondDistance = 0.9584; // angstroms
double maxRotation = 10.0; // degrees
int numOfAtoms = 3;
int numOfAngles = 1;
int numOfBonds = 2;
int numOfDihedrals = 0;
Atom oxygen = createAtom(1, 0, 0, 0);
Atom hydrogen1 = createAtom(2, 0, bondDistance, 0);
Atom hydrogen2 = createAtom(3, bondDistance, 0, 0);
Atom *atoms = (Atom *)malloc(sizeof(Atom) * 3);
atoms[0] = oxygen;
atoms[1] = hydrogen1;
atoms[2] = hydrogen2;
vector<Atom> atomVector;
atomVector.push_back(oxygen);
atomVector.push_back(hydrogen1);
atomVector.push_back(hydrogen2);
Bond b1 = createBond(1,2, bondDistance, false);
Bond b2 = createBond(1,3, bondDistance, false);
Bond *bonds = (Bond *)malloc(sizeof(Bond) * 2);
bonds[0] = b1;
bonds[1] = b2;
Angle a1 = createAngle(2,3,90,false);
Angle *angles = (Angle *)malloc(sizeof(Angle));
angles[0] = a1;
Dihedral *dihedrals = (Dihedral *)malloc(sizeof(Dihedral) * 0);
Molecule molec;
molec = createMolecule(1, atoms, angles, bonds, dihedrals,
numOfAtoms, numOfAngles, numOfBonds, numOfDihedrals);
int testNumber = 10;
printf("Testing rotateMolecule\n");
for(int i = 0 ; i < testNumber; i++)
{
//pick atom to rotate about. Cycle through all of them
Atom toRotate = atoms[1];
rotateMolecule(molec, toRotate, maxRotation);
//test that rotation is within limit
Atom newAtom1 = atoms[2];
Atom origAtom1 = getAtom(atomVector, newAtom1.id);
double angleChange1 = getAngle(newAtom1, toRotate, origAtom1);
printf("Atom1 angle change = %f\n", angleChange1);
Atom newAtom2 = atoms[0];
Atom origAtom2 = getAtom(atomVector, newAtom2.id);
double angleChange2 = getAngle(newAtom2, toRotate, origAtom2);
printf("Atom2 angle change = %f\n", angleChange2);
Atom maxAtom1 = findMaxRotation(toRotate, newAtom1, maxRotation);
Atom maxAtom2 = findMaxRotation(toRotate, newAtom2, maxRotation);
double maxAngle1 = getAngle(maxAtom1, toRotate, origAtom1);
double maxAngle2 = getAngle(maxAtom2, toRotate, origAtom2);
assert(angleChange1 <= maxAngle1);
assert(angleChange2 <= maxAngle2);
//reset atoms
molec.atoms[0] = oxygen;
molec.atoms[1] = hydrogen1;
molec.atoms[2] = hydrogen2;
}
printf("rotateMolecule passed tests.\n");
}
void testCalcChargeWrapper()
{
printf("Testing calcCharge()\n");
int numberOfTests = 10;
// data on the host
Atom *atoms1_h;
Atom *atoms2_h;
Environment *enviro_h;
double *answers_h;
// data on the device
Atom *atoms1_d;
Atom *atoms2_d;
Environment *enviro_d;
double *answers_d;
// get sizes of data
size_t atomSize = sizeof(Atom) * numberOfTests;
size_t enviroSize = sizeof(Environment);
size_t answerSize = sizeof(double) * numberOfTests;
// mallocate on host
atoms1_h = (Atom *)malloc(atomSize);
atoms2_h = (Atom *)malloc(atomSize);
enviro_h = (Environment *)malloc(enviroSize);
answers_h = (double *) malloc(answerSize);
// mallocate on device
hipMalloc((void **) &atoms1_d, atomSize);
hipMalloc((void **) &atoms2_d, atomSize);
hipMalloc((void **) &enviro_d, enviroSize);
hipMalloc((void **) &answers_d, answerSize);
double xSize = 10;
double ySize = xSize;
double zSize = ySize;
//generate atoms for test
srand(time(NULL));
for(int i = 0; i < numberOfTests; i++)
{
atoms1_h[i].x = (double) rand() / (double) RAND_MAX * xSize;
atoms2_h[i].x = (double) rand() / (double) RAND_MAX * xSize;
atoms1_h[i].y = (double) rand() / (double) RAND_MAX * ySize;
atoms2_h[i].y = (double) rand() / (double) RAND_MAX * ySize;
atoms1_h[i].z = (double) rand() / (double) RAND_MAX * zSize;
atoms2_h[i].z = (double) rand() / (double) RAND_MAX * zSize;
atoms1_h[i].charge = (double) rand() / (double) RAND_MAX * 2 - 1;
atoms2_h[i].charge = (double) rand() / (double) RAND_MAX * 2 - 1;
}
enviro_h->x = xSize;
enviro_h->y = ySize;
enviro_h->z = zSize;
enviro_h->numOfAtoms = numberOfTests;
//transfer data to the device
hipMemcpy(atoms1_d, atoms1_h, atomSize, hipMemcpyHostToDevice);
hipMemcpy(atoms2_d, atoms2_h, atomSize, hipMemcpyHostToDevice);
hipMemcpy(enviro_d, enviro_h, enviroSize, hipMemcpyHostToDevice);
//call test function
int numOfBlocks = 1;
int threadsPerBlock = 64;
hipLaunchKernelGGL(( testCalcCharge), dim3(numOfBlocks), dim3(threadsPerBlock), 0, 0, atoms1_d, atoms2_d, answers_d, enviro_d);
//transfer answers from device to host
hipMemcpy(answers_h, answers_d, answerSize, hipMemcpyDeviceToHost);
//TEST ANSWERS
for(int i = 0; i < numberOfTests; i++)
{
double expected = calc_charge(atoms1_h[i], atoms2_h[i], *enviro_h);
assert((expected - answers_h[i]) / expected < .01);
}
printf("calcCharge passed tests.\n");
free(atoms1_h);
free(atoms2_h);
free(enviro_h);
free(answers_h);
hipFree(atoms1_d);
hipFree(atoms2_d);
hipFree(enviro_d);
hipFree(answers_d);
}
int main()
{
testCopyMolecules();
testKeepMoleculeInBox();
testRotateMolecule();
testCalcChargeWrapper();
testCalcBlendingWrapper();
testGetMoleculeFromIDWrapper();
testWrapBox();
setupCalc_lj();
setupGetIndexTest();
setupMakePeriodic();
testGeneratePoints();
testCalcEnergy();
testCalcEnergyWithMolecules();
return 0;
}
| 281654341f72f623925fce47fe2eb513627759f8.cu | #include "parallelTest.cuh"
void testKeepMoleculeInBox()
{
double X = 10.0;
double Y = 10.0;
double Z = 10.0;
double temp = 100.0;
double maxTrans = .5;
int numOfAtoms = 3;
double cutoff = 9.0;
double maxRot = 15.0;
Environment enviro = createEnvironment(X, Y, Z, maxTrans, temp, numOfAtoms, cutoff, maxRot);
//test molecule compeletely outside of box.
Atom a1 = createAtom(1, 11, 10.3, 5);
Atom a2 = createAtom(2, 12.4, 1.2, 5);
Atom a3 = createAtom(3, 8.1, 2, 1.5);
Atom *atoms = (Atom *)malloc(sizeof(Atom) * numOfAtoms);
atoms[0] = a1;
atoms[1] = a2;
atoms[2] = a3;
Molecule molec;
molec.atoms = atoms;
molec.numOfAtoms = numOfAtoms;
double expectedA1[] = {2.9, 9.1, 5};
double expectedA2[] = {4.3, 0, 5};
double expectedA3[] = {0, .8, 1.5};
double **answers = (double **)malloc(sizeof(double) * 9);
answers[0] = expectedA1;
answers[1] = expectedA2;
answers[2] = expectedA3;
keepMoleculeInBox(&molec, &enviro);
double precision = .0001;
for(int i = 0; i < numOfAtoms; i++)
{
double expectedX = answers[i][0];
double actualX = molec.atoms[i].x;
double expectedY = answers[i][1];
double actualY = molec.atoms[i].y;
double expectedZ = answers[i][2];
double actualZ = molec.atoms[i].z;
assert(fabs(expectedX - actualX) < precision);
assert(fabs(expectedY - actualY) < precision);
assert(fabs(expectedZ - actualZ) < precision);
}
cout << "keepMoleculeInBox() passed tests" << endl;
printAtoms(molec.atoms, 3);
}
void setupGetIndexTest()
{
int numberOfBlocks = 3;
int threadsPerBlock = 2;
int totalTests = numberOfBlocks * threadsPerBlock;
int *xValues;
int *yValues;
int *yValues_device;
int *xValues_device;
size_t xSize = totalTests * sizeof(int);
yValues = (int *) malloc(xSize);
xValues = (int *)malloc(xSize);
cudaMalloc((void **) &yValues_device, xSize);
cudaMalloc((void **) &xValues_device, xSize);
testGetXKernel <<<numberOfBlocks, threadsPerBlock>>>(xValues_device, totalTests);
cudaMemcpy(xValues, xValues_device, xSize, cudaMemcpyDeviceToHost);
assert(xValues[0] == 1);
assert(xValues[1] == 2);
assert(xValues[2] == 2);
assert(xValues[3] == 3);
assert(xValues[4] == 3);
assert(xValues[5] == 3);
printf("getXFromIndex Correct\n");
//test getYFromIndex
testGetYKernel <<<numberOfBlocks, threadsPerBlock>>> (xValues_device,
yValues_device, totalTests);
cudaMemcpy(yValues, yValues_device, xSize, cudaMemcpyDeviceToHost);
assert(yValues[0] == 0);
assert(yValues[1] == 0);
assert(yValues[2] == 1);
assert(yValues[3] == 0);
assert(yValues[4] == 1);
assert(yValues[5] == 2);
printf("getYFromIndex Correct.\n");
cudaFree(xValues_device);
cudaFree(yValues_device);
free(yValues);
free(xValues);
}
bool compareDouble(double a, double b, double limit)
{
if((a - b) / b < limit)
{
return true;
}
else
{
return false;
}
}
void setupMakePeriodic()
{
srand(time(NULL));
int numberOfTests = 128;
double *box;;
double *inputs_host;
double *inputs_device;
double *outputs_host;
double *dev_box;
size_t inputSize = sizeof(double) * numberOfTests;
box = (double *) malloc(sizeof(double));
*box = 10.0;
inputs_host = (double *) malloc(inputSize);
outputs_host = (double *) malloc(inputSize);
cudaMalloc((void **) &inputs_device, inputSize);
cudaMalloc((void **) &dev_box, sizeof(double));
//generate random numbers
for(int i = 0; i < numberOfTests; i++)
{
inputs_host[i] = ((double) (rand() % 100));
}
//copy data to device
cudaMemcpy(inputs_device, inputs_host, inputSize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_box, box, sizeof(double), cudaMemcpyHostToDevice);
int threadsPerBlock = numberOfTests / 2;
int blocks = numberOfTests / threadsPerBlock + (numberOfTests % threadsPerBlock == 0 ? 0 : 1);
testMakePeriodicKernel <<< blocks, threadsPerBlock >>> (inputs_device, dev_box, numberOfTests);
cudaMemcpy(outputs_host, inputs_device, inputSize, cudaMemcpyDeviceToHost);
//check that values are the same as known correct function
for(int i = 0; i < numberOfTests; i++)
{
double test_output = make_periodic(inputs_host[i], *box);
assert(outputs_host[i] == test_output);
}
printf("makePeriodic passed Tests\n");
free(inputs_host);
free(outputs_host);
cudaFree(inputs_device);
}
void testWrapBox()
{
srand(time(NULL));
int numberOfTests = 128;
double box;
double *testDoubles;
size_t inputSize = sizeof(double) * numberOfTests;
box = 10.f;
testDoubles = (double *) malloc(inputSize);
//generate random numbers
for(int i = 0; i < numberOfTests; i++)
{
testDoubles[i] = (double) rand() / (double) RAND_MAX;
}
//check that values are the same as known correct function
for(int i = 0; i < numberOfTests; i++)
{
double test_output = wrap_into_box(testDoubles[i], box);
assert(wrapBox(testDoubles[i], box) == test_output);
}
printf("wrapBox passed Tests\n");
free(testDoubles);
}
void setupCalc_lj()
{
double kryptonSigma = 3.624;
double kryptonEpsilon = 0.317;
int numberOfAtoms = 2;
Atom *atoms = new Atom[numberOfAtoms];
double *energy = (double *) malloc(sizeof(double));
*energy = 1000.f;
Atom *atoms_device;
Environment *enviro_device;
double *energy_device;
cudaMalloc((void **) &atoms_device, sizeof(Atom) * numberOfAtoms);
cudaMalloc((void **) &enviro_device, sizeof(Environment));
cudaMalloc((void **) &energy_device, sizeof(double));
Environment stableEnviro = createEnvironment(10, 10, 10, .5, 298.15, numberOfAtoms, 9.0, 15.0);
Environment *enviro = &stableEnviro;
generatePoints(atoms, enviro);
atoms[0].sigma = kryptonSigma;
atoms[0].epsilon = kryptonEpsilon;
atoms[1].sigma = kryptonSigma;
atoms[1].epsilon = kryptonEpsilon;
cudaMemcpy(atoms_device, atoms, sizeof(Atom) * numberOfAtoms, cudaMemcpyHostToDevice);
cudaMemcpy(enviro_device, enviro, sizeof(Environment), cudaMemcpyHostToDevice);
testCalcLJ<<<1,1>>>(atoms_device, enviro_device, energy_device);
cudaMemcpy(energy, energy_device, sizeof(double), cudaMemcpyDeviceToHost);
double baseEnergy = calculate_energy(atoms, enviro);
assert((int)(*energy * pow(10.f, 6.f)) == (int)( baseEnergy * pow(10.f,6.f)));
printf("\nparallelEnergy = %2.10f\nlinearEnergy = %2.10f\n", *energy, baseEnergy);
printf("Calc_lj is correct\n");
free(atoms);
free(energy);
cudaFree(atoms_device);
cudaFree(enviro_device);
cudaFree(energy_device);
}
void testGeneratePoints()
{
//init atoms, environment
int numberOfAtoms = 1000;
Atom *atoms = (Atom *) malloc(numberOfAtoms * sizeof(Atom));
for (int i = 0; i < numberOfAtoms; i++)
{
atoms[i] = createAtom(i, 0, 0, 0);
}
Environment enviro = createEnvironment(10.0, 20.0, 35.0, 1.0, 298.15, numberOfAtoms, 9.0, 15.0);
generatePoints(atoms, &enviro);
//assert that all atoms positions are in range of the box
for (int i = 0; i < numberOfAtoms; i++)
{
double dim_x = atoms[i].x;
double dim_y = atoms[i].y;
double dim_z = atoms[i].z;
assert(dim_x >= 0 && dim_x <= (enviro.x) &&
dim_y >= 0 && dim_y <= (enviro.y) &&
dim_z >= 0 && dim_z <= (enviro.z));
}
printf("testGeneratePoints (atoms) successful.\n");
for (int i = 0; i < numberOfAtoms; i++)
{
atoms[i] = createAtom(i, i % 2, i % 3, i % 4);
}
int numberOfMolecules = 250;
Molecule *molecules = (Molecule *)malloc(sizeof(Molecule)*numberOfMolecules);
for (int i = 0; i < numberOfMolecules; i++)
{
Bond *blankBonds = NULL;
Angle *blankAngles = NULL;
Dihedral *blankDihedrals = NULL;
int atomCount = numberOfAtoms / numberOfMolecules;
Atom *molAtoms = (Atom *) malloc(sizeof(Atom)*atomCount);
for (int j = 0; j < atomCount; j++)
{
molAtoms[j] = atoms[i*atomCount + j];
}
molecules[i] = createMolecule(-1, molAtoms, blankAngles, blankBonds, blankDihedrals, atomCount, 0, 0, 0);
}
generatePoints(molecules, &enviro);
for (int i = 0; i < numberOfMolecules; i++)
{
for (int j = 0; j < molecules[i].numOfAtoms; j++)
{
double dim_x = molecules[i].atoms[j].x;
double dim_y = molecules[i].atoms[j].y;
double dim_z = molecules[i].atoms[j].z;
assert(dim_x >= 0 && dim_x <= (enviro.x) &&
dim_y >= 0 && dim_y <= (enviro.y) &&
dim_z >= 0 && dim_z <= (enviro.z));
}
}
printf("testGeneratePoints (molecules) successful.\n");
free(atoms);
free(molecules);
}
void testCalcEnergy()
{
// the sigma value of nitrogen
double nSigma = 3.250;
// the epsilon value of nitrogen
double nEpsilon = 0.17;
// charge of nitrogen
double nCharge = -0.850;
struct timeval le_tvBegin, le_tvEnd, pl_tvBegin, pl_tvEnd;
//Generate enviorment and atoms
int numberOfAtoms = 1000;
Environment stableEnviro = createEnvironment(5.0, 10.0, 15.0, 1.0, 298.15, numberOfAtoms, 9.0, 15.0);
Environment *enviro = &stableEnviro;
Atom *atoms = new Atom[numberOfAtoms];
for (int i = 0; i < numberOfAtoms; i++)
{
atoms[i] = createAtom(i, 0.0, 0.0, 0.0, nSigma, nEpsilon);
atoms[i].charge = nCharge;
}
generatePoints(atoms, enviro);
//calculate energy linearly
gettimeofday(&le_tvBegin,NULL); //start clock for execution time
double te_linear = calculate_energy(atoms, enviro);
gettimeofday(&le_tvEnd,NULL); //stop clock for execution time
long le_runTime = timevaldiff(&le_tvBegin,&le_tvEnd); //get difference in time in milli seconds
//calculate energy in parallel
gettimeofday(&pl_tvBegin,NULL); //start clock for execution time
double te_parallel = calcEnergyWrapper(atoms, enviro);
gettimeofday(&pl_tvEnd,NULL); //start clock for execution time
long pl_runTime = timevaldiff(&pl_tvBegin,&pl_tvEnd); //get difference in time in milli seconds
//Print out Results
printf("Number of elements: %d\n", numberOfAtoms);
printf("Linear Total Energy: %f \n", te_linear);
printf("In %lu ms\n", le_runTime);
printf("Parallel Total Energy: %f \n", te_parallel);
printf("In %lu ms\n", pl_runTime);
assert(compareDouble(te_linear, te_parallel, .05));
printf("testCalcEnergy successful.\n");
}
void testCalcEnergyWithMolecules()
{
// the sigma value of nitrogen
double nSigma = 3.250;
// the epsilon value of nitrogen
double nEpsilon = 0.17;
// charge of nitrogen
double nCharge = -0.850;
// the sigma value of krypton used in the LJ simulation
double kryptonSigma = 3.624;
// the epsilon value of krypton used in the LJ simulation
double kryptonEpsilon = 0.317;
// charge of krypton
double kryptonCharge = 0.0;
struct timeval le_tvBegin, le_tvEnd, pl_tvBegin, pl_tvEnd;
//Generate enviorment and atoms
int numberOfAtoms = 500;
Environment stableEnviro = createEnvironment(5.0, 10.0, 15.0, 1.0, 298.15, numberOfAtoms, 9.0, 15.0);
Environment *enviro = &stableEnviro;
Atom *atoms = new Atom[numberOfAtoms];
for (int i = 0; i < numberOfAtoms; i++)
{
if ((i % 5) < 3)
{
atoms[i] = createAtom(i, 0.0, 0.0, 0.0, kryptonSigma, kryptonEpsilon);
atoms[i].charge = kryptonCharge;
}
else
{
atoms[i] = createAtom(i, 0.0, 0.0, 0.0, nSigma, nEpsilon);
atoms[i].charge = nCharge;
}
}
enviro->numOfMolecules = 100;
Molecule *molecules;
molecules = (Molecule *)malloc(sizeof(Molecule) * numberOfAtoms);
int atomCount = 0;
for(int i = 0; i < enviro->numOfMolecules; i++)
{
molecules[i].numOfAtoms = 5;
molecules[i].atoms = (Atom *)malloc(sizeof(Atom) * 5);
molecules[i].id = atomCount;
Hop *hops = (Hop *)malloc(sizeof(Hop) * 2);
hops[0] = createHop(atomCount, atomCount+3, 3);
hops[1] = createHop(atomCount, atomCount+4, 4);
molecules[i].hops = hops;
molecules[i].numOfHops = 2;
for (int j = 0; j < molecules[i].numOfAtoms; j++)
{
molecules[i].atoms[j] = atoms[atomCount];
atomCount++;
}
}
generatePoints(molecules, enviro);
for (int i = 0; i < enviro->numOfMolecules; i++)
{
for (int j = 0; j < molecules[i].numOfAtoms; j++)
{
atoms[i * 5 + j] = molecules[i].atoms[j];
}
}
//calculate energy linearly
gettimeofday(&le_tvBegin,NULL); //start clock for execution time
double te_linear = calculate_energy(atoms, enviro, molecules);
gettimeofday(&le_tvEnd,NULL); //stop clock for execution time
long le_runTime = timevaldiff(&le_tvBegin,&le_tvEnd); //get difference in time in milli seconds
//calculate energy in parallel
gettimeofday(&pl_tvBegin,NULL); //start clock for execution time
double te_parallel = calcEnergyWrapper(molecules, enviro);
gettimeofday(&pl_tvEnd,NULL); //start clock for execution time
long pl_runTime = timevaldiff(&pl_tvBegin,&pl_tvEnd); //get difference in time in milli seconds
//Print out Results
printf("Number of elements: %d\n", numberOfAtoms);
printf("Linear Total Energy: %f \n", te_linear);
printf("In %lu ms\n", le_runTime);
printf("Parallel Total Energy: %f \n", te_parallel);
printf("In %lu ms\n", pl_runTime);
assert(compareDouble(te_linear, te_parallel, .05));
printf("testCalcEnergyWithMolecules successful.\n");
}
void testGetMoleculeFromIDWrapper()
{
int numberOfAtoms = 11;
int numberOfMolecules = 3;
Atom *atoms;
DeviceMolecule *molecules;
Environment enviro;
int *answers;
Atom *atoms_device;
DeviceMolecule *molecules_device;
int *answers_device;
enviro.numOfAtoms = numberOfAtoms;
enviro.numOfMolecules = numberOfMolecules;
atoms = (Atom *)malloc(sizeof(Atom) * numberOfAtoms);
molecules = (DeviceMolecule *)malloc(sizeof(DeviceMolecule) *numberOfMolecules);
answers = (int *)malloc(sizeof(int) * numberOfAtoms);
cudaMalloc((void **) &atoms_device, sizeof(Atom) * numberOfAtoms);
cudaMalloc((void **) &molecules_device, sizeof(DeviceMolecule) * numberOfMolecules);
cudaMalloc((void **) &answers_device, sizeof(int) * numberOfAtoms);
enviro.numOfAtoms = numberOfAtoms;
enviro.numOfMolecules = numberOfMolecules;
for(int i = 0; i < numberOfAtoms; i++)
{
atoms[i].id = i;
}
molecules[0].id = 0;
molecules[1].id = 2;
molecules[2].id = 6;
cudaMemcpy(atoms_device, atoms, sizeof(Atom) * numberOfAtoms, cudaMemcpyHostToDevice);
cudaMemcpy(molecules_device, molecules, sizeof(DeviceMolecule) * numberOfMolecules, cudaMemcpyHostToDevice);
int numberOfBlocks = 1;
int threadsPerBlock = 128;
testGetMoleculeFromID<<<numberOfBlocks,threadsPerBlock>>>(atoms_device, molecules_device, enviro, numberOfAtoms, answers_device);
cudaMemcpy(answers, answers_device, sizeof(int) * numberOfAtoms, cudaMemcpyDeviceToHost);
assert(answers[0] == 0);
assert(answers[1] == 0);
assert(answers[2] == 2);
assert(answers[3] == 2);
assert(answers[4] == 2);
assert(answers[5] == 2);
assert(answers[6] == 6);
assert(answers[7] == 6);
assert(answers[8] == 6);
assert(answers[9] == 6);
assert(answers[10] == 6);
printf("getMoleculeFromID passed tests\n");
free(atoms);
free(molecules);
free(answers);
cudaFree(atoms_device);
cudaFree(molecules_device);
cudaFree(answers_device);
}
void testCalcBlendingWrapper()
{
double *d1, *d2, *d1_device, *d2_device, *answers, *answers_device;
int numberOfTests = 5;
size_t doubleSize = sizeof(double) * numberOfTests;
d1 = (double *)malloc(doubleSize);
d2 = (double *)malloc(doubleSize);
answers = (double *)malloc(doubleSize);
cudaMalloc((void **) &d1_device, doubleSize);
cudaMalloc((void **) &d2_device, doubleSize);
cudaMalloc((void **) &answers_device, doubleSize);
d1[0] = 0.f;
d2[0] = 0.f;
d1[1] = 4.5;
d2[1] = 2.32;
d1[2] = 52.34;
d2[2] = 5.f;
d1[3] = 1.f;
d2[3] = 7.f;
d1[4] = 34.56;
d2[4] = 12.7;
cudaMemcpy(d1_device, d1, doubleSize, cudaMemcpyHostToDevice);
cudaMemcpy(d2_device, d2, doubleSize, cudaMemcpyHostToDevice);
int blocks = 1;
int threadsPerBlock = 64;
testCalcBlending <<<blocks, threadsPerBlock>>>(d1_device, d2_device, answers_device, numberOfTests);
cudaMemcpy(answers, answers_device, doubleSize, cudaMemcpyDeviceToHost);
for(int i = 0 ; i < numberOfTests; i++)
{
double expected = sqrt(d1[i] * d2[i]);
assert(answers[i] / sqrt(d1[i] * d2[i]) < 0.01 || answers[i] == expected);
}
printf("calcBlending passed tests.\n");
free(d1);
free(d2);
free(answers);
cudaFree(d1_device);
cudaFree(d2_device);
cudaFree(answers_device);
}
void testGetFValueWrapper()
{
Environment *enviro, *dev_enviro;
DeviceMolecule *molecules, *dev_molecules;
Atom *mol1_atoms, *mol2_atoms, *atom1List, *atom2List, *dev_atom1List, *dev_atom2List;
Hop *dev_hops;
double *fvalues, *dev_fvalues;
int numberOfTests = 5;
Environment stable_enviro = createEnvironment(5.0,5.0,5.0,1.0,270.0,5,9.0,15.0);
enviro = &stable_enviro;
mol1_atoms = (Atom *)malloc(sizeof(Atom)*5);
mol2_atoms = (Atom *)malloc(sizeof(Atom));
atom1List = (Atom *)malloc(sizeof(Atom)*numberOfTests);
atom2List = (Atom *)malloc(sizeof(Atom)*numberOfTests);
fvalues = (double *)malloc(sizeof(double)*4);
for (int i = 0; i < 5; i++)
{
mol1_atoms[i] = createAtom(i,1.0,1.0,1.0);
}
for (int i = 0; i < 5; i++)
{
atom1List[i] = mol1_atoms[0];
if (i < 4)
atom2List[i] = mol1_atoms[i+1];
}
mol2_atoms[0] = createAtom(5,1.0,1.0,1.0);
atom2List[4] = mol2_atoms[0];
Hop* hops = (Hop *)malloc(sizeof(Hop)*2);
hops[0] = createHop(0,3,3);
hops[1] = createHop(0,4,4);
molecules = (DeviceMolecule *)malloc(sizeof(DeviceMolecule)*2);
molecules[0] = createDeviceMolecule(0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 2);
molecules[1] = createDeviceMolecule(5, 5, 1, 0, 0, 0, 0, 0, 0, 2, 0);
cudaMalloc((void **) &dev_enviro, sizeof(Environment));
cudaMalloc((void **) &dev_molecules, sizeof(DeviceMolecule)*2);
cudaMalloc((void **) &dev_atom1List, sizeof(Atom)*numberOfTests);
cudaMalloc((void **) &dev_atom2List, sizeof(Atom)*numberOfTests);
cudaMalloc((void **) &dev_fvalues, sizeof(double)*numberOfTests);
cudaMalloc((void **) &dev_hops, sizeof(Hop)*2);
cudaMemcpy(dev_enviro, enviro, sizeof(Environment), cudaMemcpyHostToDevice);
cudaMemcpy(dev_molecules, molecules, sizeof(DeviceMolecule)*2, cudaMemcpyHostToDevice);
cudaMemcpy(dev_atom1List, atom1List, sizeof(Atom)*numberOfTests, cudaMemcpyHostToDevice);
cudaMemcpy(dev_atom2List, atom2List, sizeof(Atom)*numberOfTests, cudaMemcpyHostToDevice);
cudaMemcpy(dev_hops, hops, sizeof(Hop)*2, cudaMemcpyHostToDevice);
int blocks = 1;
int threadsPerBlock = 64;
testGetFValue <<<blocks, threadsPerBlock>>>(dev_atom1List, dev_atom2List, dev_molecules, dev_enviro, dev_fvalues, numberOfTests, dev_hops);
cudaMemcpy(fvalues, dev_fvalues, sizeof(double)*numberOfTests, cudaMemcpyDeviceToHost);
double *expected = (double *)malloc(sizeof(double)*numberOfTests);
expected[0] = 0.0;
expected[1] = 0.0;
expected[2] = 0.5;
expected[3] = 1.0;
expected[4] = 1.0;
for(int i = 0 ; i < numberOfTests; i++)
{
assert(expected[i] == fvalues[i]);
}
printf("testGetFValue passed tests.\n");
free(mol1_atoms);
free(mol2_atoms);
free(atom1List);
free(atom2List);
free(fvalues);
free(molecules);
free(hops);
cudaFree(dev_enviro);
cudaFree(dev_molecules);
cudaFree(dev_atom1List);
cudaFree(dev_atom2List);
cudaFree(dev_fvalues);
cudaFree(dev_hops);
}
Atom findMaxRotation(Atom pivot, Atom toRotate, double rotation){
toRotate.x -= pivot.x;
toRotate.y -= pivot.y;
toRotate.z -= pivot.z;
rotateAboutX(toRotate, rotation);
rotateAboutY(toRotate, rotation);
rotateAboutZ(toRotate, rotation);
toRotate.x += pivot.x;
toRotate.y += pivot.y;
toRotate.z += pivot.z;
return toRotate;
}
void testRotateMolecule()
{
srand(time(NULL));
//Testing on a molecule that is not totaly unlike water
double bondDistance = 0.9584; // angstroms
double maxRotation = 10.0; // degrees
int numOfAtoms = 3;
int numOfAngles = 1;
int numOfBonds = 2;
int numOfDihedrals = 0;
Atom oxygen = createAtom(1, 0, 0, 0);
Atom hydrogen1 = createAtom(2, 0, bondDistance, 0);
Atom hydrogen2 = createAtom(3, bondDistance, 0, 0);
Atom *atoms = (Atom *)malloc(sizeof(Atom) * 3);
atoms[0] = oxygen;
atoms[1] = hydrogen1;
atoms[2] = hydrogen2;
vector<Atom> atomVector;
atomVector.push_back(oxygen);
atomVector.push_back(hydrogen1);
atomVector.push_back(hydrogen2);
Bond b1 = createBond(1,2, bondDistance, false);
Bond b2 = createBond(1,3, bondDistance, false);
Bond *bonds = (Bond *)malloc(sizeof(Bond) * 2);
bonds[0] = b1;
bonds[1] = b2;
Angle a1 = createAngle(2,3,90,false);
Angle *angles = (Angle *)malloc(sizeof(Angle));
angles[0] = a1;
Dihedral *dihedrals = (Dihedral *)malloc(sizeof(Dihedral) * 0);
Molecule molec;
molec = createMolecule(1, atoms, angles, bonds, dihedrals,
numOfAtoms, numOfAngles, numOfBonds, numOfDihedrals);
int testNumber = 10;
printf("Testing rotateMolecule\n");
for(int i = 0 ; i < testNumber; i++)
{
//pick atom to rotate about. Cycle through all of them
Atom toRotate = atoms[1];
rotateMolecule(molec, toRotate, maxRotation);
//test that rotation is within limit
Atom newAtom1 = atoms[2];
Atom origAtom1 = getAtom(atomVector, newAtom1.id);
double angleChange1 = getAngle(newAtom1, toRotate, origAtom1);
printf("Atom1 angle change = %f\n", angleChange1);
Atom newAtom2 = atoms[0];
Atom origAtom2 = getAtom(atomVector, newAtom2.id);
double angleChange2 = getAngle(newAtom2, toRotate, origAtom2);
printf("Atom2 angle change = %f\n", angleChange2);
Atom maxAtom1 = findMaxRotation(toRotate, newAtom1, maxRotation);
Atom maxAtom2 = findMaxRotation(toRotate, newAtom2, maxRotation);
double maxAngle1 = getAngle(maxAtom1, toRotate, origAtom1);
double maxAngle2 = getAngle(maxAtom2, toRotate, origAtom2);
assert(angleChange1 <= maxAngle1);
assert(angleChange2 <= maxAngle2);
//reset atoms
molec.atoms[0] = oxygen;
molec.atoms[1] = hydrogen1;
molec.atoms[2] = hydrogen2;
}
printf("rotateMolecule passed tests.\n");
}
void testCalcChargeWrapper()
{
printf("Testing calcCharge()\n");
int numberOfTests = 10;
// data on the host
Atom *atoms1_h;
Atom *atoms2_h;
Environment *enviro_h;
double *answers_h;
// data on the device
Atom *atoms1_d;
Atom *atoms2_d;
Environment *enviro_d;
double *answers_d;
// get sizes of data
size_t atomSize = sizeof(Atom) * numberOfTests;
size_t enviroSize = sizeof(Environment);
size_t answerSize = sizeof(double) * numberOfTests;
// mallocate on host
atoms1_h = (Atom *)malloc(atomSize);
atoms2_h = (Atom *)malloc(atomSize);
enviro_h = (Environment *)malloc(enviroSize);
answers_h = (double *) malloc(answerSize);
// mallocate on device
cudaMalloc((void **) &atoms1_d, atomSize);
cudaMalloc((void **) &atoms2_d, atomSize);
cudaMalloc((void **) &enviro_d, enviroSize);
cudaMalloc((void **) &answers_d, answerSize);
double xSize = 10;
double ySize = xSize;
double zSize = ySize;
//generate atoms for test
srand(time(NULL));
for(int i = 0; i < numberOfTests; i++)
{
atoms1_h[i].x = (double) rand() / (double) RAND_MAX * xSize;
atoms2_h[i].x = (double) rand() / (double) RAND_MAX * xSize;
atoms1_h[i].y = (double) rand() / (double) RAND_MAX * ySize;
atoms2_h[i].y = (double) rand() / (double) RAND_MAX * ySize;
atoms1_h[i].z = (double) rand() / (double) RAND_MAX * zSize;
atoms2_h[i].z = (double) rand() / (double) RAND_MAX * zSize;
atoms1_h[i].charge = (double) rand() / (double) RAND_MAX * 2 - 1;
atoms2_h[i].charge = (double) rand() / (double) RAND_MAX * 2 - 1;
}
enviro_h->x = xSize;
enviro_h->y = ySize;
enviro_h->z = zSize;
enviro_h->numOfAtoms = numberOfTests;
//transfer data to the device
cudaMemcpy(atoms1_d, atoms1_h, atomSize, cudaMemcpyHostToDevice);
cudaMemcpy(atoms2_d, atoms2_h, atomSize, cudaMemcpyHostToDevice);
cudaMemcpy(enviro_d, enviro_h, enviroSize, cudaMemcpyHostToDevice);
//call test function
int numOfBlocks = 1;
int threadsPerBlock = 64;
testCalcCharge<<<numOfBlocks, threadsPerBlock>>>(atoms1_d, atoms2_d, answers_d, enviro_d);
//transfer answers from device to host
cudaMemcpy(answers_h, answers_d, answerSize, cudaMemcpyDeviceToHost);
//TEST ANSWERS
for(int i = 0; i < numberOfTests; i++)
{
double expected = calc_charge(atoms1_h[i], atoms2_h[i], *enviro_h);
assert((expected - answers_h[i]) / expected < .01);
}
printf("calcCharge passed tests.\n");
free(atoms1_h);
free(atoms2_h);
free(enviro_h);
free(answers_h);
cudaFree(atoms1_d);
cudaFree(atoms2_d);
cudaFree(enviro_d);
cudaFree(answers_d);
}
int main()
{
testCopyMolecules();
testKeepMoleculeInBox();
testRotateMolecule();
testCalcChargeWrapper();
testCalcBlendingWrapper();
testGetMoleculeFromIDWrapper();
testWrapBox();
setupCalc_lj();
setupGetIndexTest();
setupMakePeriodic();
testGeneratePoints();
testCalcEnergy();
testCalcEnergyWithMolecules();
return 0;
}
|
1fbd8e0b900631aaa5a9ee3f3005308708a60834.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! Repeat <T = ?, Device = CUDA> */
template <typename T>
__global__ void _Repeat(
const int nthreads,
const int repeat_dim,
const int x_inner_dim,
const int y_inner_dim,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(y_idx, nthreads) {
const int iix = y_idx % x_inner_dim;
const int rix = (y_idx / y_inner_dim) % repeat_dim;
const int oix = y_idx / y_inner_dim / repeat_dim;
const int x_idx = (oix * repeat_dim + rix)
* x_inner_dim + iix;
y[y_idx] = x[x_idx];
}
}
/*! RepeatGrad <T = ?, Device = CUDA> */
template <typename T>
__global__ void _RepeatGrad(
const int nthreads,
const int repeat_dim,
const int x_inner_dim,
const int y_inner_dim,
const int repeats,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(x_idx, nthreads) {
const int iix = x_idx % x_inner_dim;
const int rix = (x_idx / x_inner_dim) % repeat_dim;
const int oix = x_idx / x_inner_dim / repeat_dim;
const T* dY = dy + ((oix * repeat_dim + rix)
* y_inner_dim + iix);
T gradient = 0;
for (int r = 0; r < repeats; ++r)
gradient += dY[r * x_inner_dim];
dx[x_idx] = gradient;
}
}
/*! RepeatGrad <T = float16, Device = CUDA> */
__global__ void _RepeatGradHalf(
const int nthreads,
const int repeat_dim,
const int x_inner_dim,
const int y_inner_dim,
const int repeats,
const half* dy,
half* dx) {
CUDA_1D_KERNEL_LOOP(x_idx, nthreads) {
#if __CUDA_ARCH__ >= 530
const int iix = x_idx % x_inner_dim;
const int rix = (x_idx / x_inner_dim) % repeat_dim;
const int oix = x_idx / x_inner_dim / repeat_dim;
const half* dY = dy + ((oix * repeat_dim + rix)
* y_inner_dim + iix);
float gradient = 0;
for (int r = 0; r < repeats; ++r)
gradient += __half2float(dY[r * x_inner_dim]);
dx[x_idx] = __float2half(gradient);
#endif
}
}
/*! Kernel Launchers */
#define DEFINE_REPEAT_KERNEL_LAUNCHER(T) \
template<> void Repeat<T, CUDAContext>( \
const int outer_dim, \
const int repeat_dim, \
const int inner_dim, \
const int repeats, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
auto y_inner_dim = inner_dim * repeats; \
auto nthreads = outer_dim * repeat_dim * inner_dim * repeats; \
_Repeat<T> \
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS, \
0, ctx->cuda_stream() >> > \
(nthreads, repeat_dim, inner_dim, \
y_inner_dim, x, y); \
}
#define DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(T) \
template<> void RepeatGrad<T, CUDAContext>( \
const int outer_dim, \
const int repeat_dim, \
const int inner_dim, \
const int repeats, \
const T* dy, \
T* dx, \
CUDAContext* ctx) { \
auto y_inner_dim = inner_dim * repeats; \
auto nthreads = outer_dim * repeat_dim * inner_dim; \
_RepeatGrad<T> \
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS, \
0, ctx->cuda_stream() >> > \
(nthreads, repeat_dim, inner_dim, \
y_inner_dim, repeats, dy, dx); \
}
DEFINE_REPEAT_KERNEL_LAUNCHER(bool);
DEFINE_REPEAT_KERNEL_LAUNCHER(int8_t);
DEFINE_REPEAT_KERNEL_LAUNCHER(uint8_t);
DEFINE_REPEAT_KERNEL_LAUNCHER(int);
DEFINE_REPEAT_KERNEL_LAUNCHER(int64_t);
DEFINE_REPEAT_KERNEL_LAUNCHER(float16);
DEFINE_REPEAT_KERNEL_LAUNCHER(float);
DEFINE_REPEAT_KERNEL_LAUNCHER(double);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(int8_t);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(uint8_t);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(int);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(int64_t);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(float);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(double);
template<> void RepeatGrad<float16, CUDAContext>(
const int outer_dim,
const int repeat_dim,
const int inner_dim,
const int repeats,
const float16* dy,
float16* dx,
CUDAContext* ctx) {
auto y_inner_dim = inner_dim * repeats;
auto nthreads = outer_dim * repeat_dim * inner_dim;
_RepeatGradHalf
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS, \
0, ctx->cuda_stream() >> > \
(nthreads, repeat_dim, inner_dim,
y_inner_dim, repeats,
reinterpret_cast<const half*>(dy),
reinterpret_cast<half*>(dx));
}
#undef DEFINE_REPEAT_KERNEL_LAUNCHER
#undef DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA | 1fbd8e0b900631aaa5a9ee3f3005308708a60834.cu | #ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! Repeat <T = ?, Device = CUDA> */
template <typename T>
__global__ void _Repeat(
const int nthreads,
const int repeat_dim,
const int x_inner_dim,
const int y_inner_dim,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(y_idx, nthreads) {
const int iix = y_idx % x_inner_dim;
const int rix = (y_idx / y_inner_dim) % repeat_dim;
const int oix = y_idx / y_inner_dim / repeat_dim;
const int x_idx = (oix * repeat_dim + rix)
* x_inner_dim + iix;
y[y_idx] = x[x_idx];
}
}
/*! RepeatGrad <T = ?, Device = CUDA> */
template <typename T>
__global__ void _RepeatGrad(
const int nthreads,
const int repeat_dim,
const int x_inner_dim,
const int y_inner_dim,
const int repeats,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(x_idx, nthreads) {
const int iix = x_idx % x_inner_dim;
const int rix = (x_idx / x_inner_dim) % repeat_dim;
const int oix = x_idx / x_inner_dim / repeat_dim;
const T* dY = dy + ((oix * repeat_dim + rix)
* y_inner_dim + iix);
T gradient = 0;
for (int r = 0; r < repeats; ++r)
gradient += dY[r * x_inner_dim];
dx[x_idx] = gradient;
}
}
/*! RepeatGrad <T = float16, Device = CUDA> */
__global__ void _RepeatGradHalf(
const int nthreads,
const int repeat_dim,
const int x_inner_dim,
const int y_inner_dim,
const int repeats,
const half* dy,
half* dx) {
CUDA_1D_KERNEL_LOOP(x_idx, nthreads) {
#if __CUDA_ARCH__ >= 530
const int iix = x_idx % x_inner_dim;
const int rix = (x_idx / x_inner_dim) % repeat_dim;
const int oix = x_idx / x_inner_dim / repeat_dim;
const half* dY = dy + ((oix * repeat_dim + rix)
* y_inner_dim + iix);
float gradient = 0;
for (int r = 0; r < repeats; ++r)
gradient += __half2float(dY[r * x_inner_dim]);
dx[x_idx] = __float2half(gradient);
#endif
}
}
/*! Kernel Launchers */
#define DEFINE_REPEAT_KERNEL_LAUNCHER(T) \
template<> void Repeat<T, CUDAContext>( \
const int outer_dim, \
const int repeat_dim, \
const int inner_dim, \
const int repeats, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
auto y_inner_dim = inner_dim * repeats; \
auto nthreads = outer_dim * repeat_dim * inner_dim * repeats; \
_Repeat<T> \
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS, \
0, ctx->cuda_stream() >> > \
(nthreads, repeat_dim, inner_dim, \
y_inner_dim, x, y); \
}
#define DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(T) \
template<> void RepeatGrad<T, CUDAContext>( \
const int outer_dim, \
const int repeat_dim, \
const int inner_dim, \
const int repeats, \
const T* dy, \
T* dx, \
CUDAContext* ctx) { \
auto y_inner_dim = inner_dim * repeats; \
auto nthreads = outer_dim * repeat_dim * inner_dim; \
_RepeatGrad<T> \
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS, \
0, ctx->cuda_stream() >> > \
(nthreads, repeat_dim, inner_dim, \
y_inner_dim, repeats, dy, dx); \
}
DEFINE_REPEAT_KERNEL_LAUNCHER(bool);
DEFINE_REPEAT_KERNEL_LAUNCHER(int8_t);
DEFINE_REPEAT_KERNEL_LAUNCHER(uint8_t);
DEFINE_REPEAT_KERNEL_LAUNCHER(int);
DEFINE_REPEAT_KERNEL_LAUNCHER(int64_t);
DEFINE_REPEAT_KERNEL_LAUNCHER(float16);
DEFINE_REPEAT_KERNEL_LAUNCHER(float);
DEFINE_REPEAT_KERNEL_LAUNCHER(double);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(int8_t);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(uint8_t);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(int);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(int64_t);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(float);
DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER(double);
template<> void RepeatGrad<float16, CUDAContext>(
const int outer_dim,
const int repeat_dim,
const int inner_dim,
const int repeats,
const float16* dy,
float16* dx,
CUDAContext* ctx) {
auto y_inner_dim = inner_dim * repeats;
auto nthreads = outer_dim * repeat_dim * inner_dim;
_RepeatGradHalf
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS, \
0, ctx->cuda_stream() >> > \
(nthreads, repeat_dim, inner_dim,
y_inner_dim, repeats,
reinterpret_cast<const half*>(dy),
reinterpret_cast<half*>(dx));
}
#undef DEFINE_REPEAT_KERNEL_LAUNCHER
#undef DEFINE_REPEAT_GRAD_KERNEL_LAUNCHER
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA |
e2e0d5db4ebb5ffd735eb3a49deb035aa9bb295e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <emmintrin.h>
#include <sys/time.h>
#include <stdio.h>
const long N = 1000000; // Change array size (may need a long)
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// HELPER CODE TO INITIALIZE, PRINT AND TIME
struct timeval start, end;
void starttime() {
gettimeofday( &start, 0 );
}
void endtime(const char* c) {
gettimeofday( &end, 0 );
double elapsed = ( end.tv_sec - start.tv_sec ) * 1000.0 + ( end.tv_usec - start.tv_usec ) / 1000.0;
printf("%s: %f ms\n", c, elapsed);
}
void init(const char* c) {
printf("***************** %s **********************\n", c);
// TMC Commenting Out for Class
printf("Running %s...\n", c);
starttime();
}
void finish(int a, long N, const char* c) {
endtime(c);
printf("Done.\n");
printf("\nThere are %ld Prime numbers between 1 and %ld.", a, N);
printf("***************************************************\n");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
__global__ void prime(long* a, long high) {
// Prime algorithm
bool check = false;
for(int i = 2; i <= high/2; ++i) {
if(high % i == 0) {
check = true;
break;
}
}
if(check)
++a;
}
*/
// Normal C function to square root values
int normal(int a, long N)
{
long low = 2, high = N, i, check;
// printf("Prime numbers between 1 and %d are: ",high);
while (low < high)
{
check = 0;
for(i = 2; i <= low/2; ++i)
{
if(low % i == 0)
{
check = 1;
break;
}
}
if (check == 0)
++a;
//printf("%d ", low);
++low;
}
return a;
}
// GPU function to square root values
// Every thread on every core runs this function
__global__ void gpu_prime(int* a, long N) {
// One element per thread on each core
// blockIdx.x = Core #
// blockDim.x = Threads per core
// threadIdx.x = Thread #
// The formula below makes sure the value of element
// is different on every thread on every core
long element = blockIdx.x*blockDim.x + threadIdx.x;
// If there is not an event split, some threads will be
// out of bounds
// We just let those do nothing
// The rest square root their elements
if (element <= N && element >= 2) {
/*
if (element % 2 != 0)
element = N - element;
//printf("%d\n", element);
*/
//printf("%d\n", element);
int check = 0;
for(int i = 2; i <= element/2; ++i) {
if(element % i == 0) {
check = 1;
break;
}
}
if (check == 0){
atomicAdd(a,1);
}
}
}
void gpu(int* a, long N) {
int threadsPerCore = 512; // This can vary, up to 1024
long numCores = N / threadsPerCore + 1; // This division will work. If the split is uneven, we overshoot
// Budget memory for counter
// Memory must be on the graphics card (use hipMalloc for this)
int* gpuA;
hipMalloc(&gpuA, sizeof(int)); // Allocate enough memory on the GPU
// Copy array of floats a from CPU memory to gpuA on the graphics card
// Note: This operation is SLOW. You will have to offset this cost with the parallelism below
hipMemcpy(gpuA, a, sizeof(int), hipMemcpyHostToDevice);
//printf("%ld\n", *gpuA);
// Call parallel function with specified number of cores and threads per core
hipLaunchKernelGGL(( gpu_prime), dim3(numCores), dim3(threadsPerCore), 0, 0, gpuA, N);
// Copy square rooted array of floats gpuA from graphics card to a in CPU memory
// Again, this operation is SLOW.
hipMemcpy(a, gpuA, sizeof(int), hipMemcpyDeviceToHost);
// Release the memory for gpuA
hipFree(&gpuA); // Free the memory on the GPU
}
int main()
{
/////////////////////////////////////////////////////////////////////////
// GPUs will likely have large N
// Budget memory on the heap, prevent a stack overflow
int a = 1;
/////////////////////////////////////////////////////////////////////////
// Test 1: Sequential For Loop
init ("Normal");
a = normal(a, N);
finish(a, N, "Normal");
// Test 2: GPU
a = 1;
init("GPU");
gpu(&a, N);
finish(a, N, "GPU");
// Memory on the heap must be freed manually
//free(&a);
return 0;
}
| e2e0d5db4ebb5ffd735eb3a49deb035aa9bb295e.cu | #include <emmintrin.h>
#include <sys/time.h>
#include <stdio.h>
const long N = 1000000; // Change array size (may need a long)
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// HELPER CODE TO INITIALIZE, PRINT AND TIME
struct timeval start, end;
void starttime() {
gettimeofday( &start, 0 );
}
void endtime(const char* c) {
gettimeofday( &end, 0 );
double elapsed = ( end.tv_sec - start.tv_sec ) * 1000.0 + ( end.tv_usec - start.tv_usec ) / 1000.0;
printf("%s: %f ms\n", c, elapsed);
}
void init(const char* c) {
printf("***************** %s **********************\n", c);
// TMC Commenting Out for Class
printf("Running %s...\n", c);
starttime();
}
void finish(int a, long N, const char* c) {
endtime(c);
printf("Done.\n");
printf("\nThere are %ld Prime numbers between 1 and %ld.", a, N);
printf("***************************************************\n");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
__global__ void prime(long* a, long high) {
// Prime algorithm
bool check = false;
for(int i = 2; i <= high/2; ++i) {
if(high % i == 0) {
check = true;
break;
}
}
if(check)
++a;
}
*/
// Normal C function to square root values
int normal(int a, long N)
{
long low = 2, high = N, i, check;
// printf("Prime numbers between 1 and %d are: ",high);
while (low < high)
{
check = 0;
for(i = 2; i <= low/2; ++i)
{
if(low % i == 0)
{
check = 1;
break;
}
}
if (check == 0)
++a;
//printf("%d ", low);
++low;
}
return a;
}
// GPU function to square root values
// Every thread on every core runs this function
__global__ void gpu_prime(int* a, long N) {
// One element per thread on each core
// blockIdx.x = Core #
// blockDim.x = Threads per core
// threadIdx.x = Thread #
// The formula below makes sure the value of element
// is different on every thread on every core
long element = blockIdx.x*blockDim.x + threadIdx.x;
// If there is not an event split, some threads will be
// out of bounds
// We just let those do nothing
// The rest square root their elements
if (element <= N && element >= 2) {
/*
if (element % 2 != 0)
element = N - element;
//printf("%d\n", element);
*/
//printf("%d\n", element);
int check = 0;
for(int i = 2; i <= element/2; ++i) {
if(element % i == 0) {
check = 1;
break;
}
}
if (check == 0){
atomicAdd(a,1);
}
}
}
void gpu(int* a, long N) {
int threadsPerCore = 512; // This can vary, up to 1024
long numCores = N / threadsPerCore + 1; // This division will work. If the split is uneven, we overshoot
// Budget memory for counter
// Memory must be on the graphics card (use cudaMalloc for this)
int* gpuA;
cudaMalloc(&gpuA, sizeof(int)); // Allocate enough memory on the GPU
// Copy array of floats a from CPU memory to gpuA on the graphics card
// Note: This operation is SLOW. You will have to offset this cost with the parallelism below
cudaMemcpy(gpuA, a, sizeof(int), cudaMemcpyHostToDevice);
//printf("%ld\n", *gpuA);
// Call parallel function with specified number of cores and threads per core
gpu_prime<<<numCores, threadsPerCore>>>(gpuA, N);
// Copy square rooted array of floats gpuA from graphics card to a in CPU memory
// Again, this operation is SLOW.
cudaMemcpy(a, gpuA, sizeof(int), cudaMemcpyDeviceToHost);
// Release the memory for gpuA
cudaFree(&gpuA); // Free the memory on the GPU
}
int main()
{
/////////////////////////////////////////////////////////////////////////
// GPUs will likely have large N
// Budget memory on the heap, prevent a stack overflow
int a = 1;
/////////////////////////////////////////////////////////////////////////
// Test 1: Sequential For Loop
init ("Normal");
a = normal(a, N);
finish(a, N, "Normal");
// Test 2: GPU
a = 1;
init("GPU");
gpu(&a, N);
finish(a, N, "GPU");
// Memory on the heap must be freed manually
//free(&a);
return 0;
}
|
2104b61f21d57781dc1ea35f0268b270a0484d39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#ifndef __HIPCC__
#define __HIPCC__
#endif
#define n 10
#define W 100
hipError_t knapsackCuda(int *output, const int *val, const int *wt, unsigned int size);
__device__ int maxi(int a, int b) {
return (a > b)? a : b;
}
__global__ void knapsackKernel(int *wt, int *val, int *output, int i) {
int w = threadIdx.x;
//__syncthreads();
if (i == 0 || w == 0)
output[(i*W)+w] = 0;
else if (wt[i-1] <= w)
output[(i*W)+w] = maxi(val[i-1] + output[((i-1)*W)+(w-wt[i-1])], output[((i-1)*W)+w]);
else
output[(i*W)+w] = output[((i-1)*W)+w];
__syncthreads();
} | 2104b61f21d57781dc1ea35f0268b270a0484d39.cu | #include "includes.h"
#ifndef __CUDACC__
#define __CUDACC__
#endif
#define n 10
#define W 100
cudaError_t knapsackCuda(int *output, const int *val, const int *wt, unsigned int size);
__device__ int maxi(int a, int b) {
return (a > b)? a : b;
}
__global__ void knapsackKernel(int *wt, int *val, int *output, int i) {
int w = threadIdx.x;
//__syncthreads();
if (i == 0 || w == 0)
output[(i*W)+w] = 0;
else if (wt[i-1] <= w)
output[(i*W)+w] = maxi(val[i-1] + output[((i-1)*W)+(w-wt[i-1])], output[((i-1)*W)+w]);
else
output[(i*W)+w] = output[((i-1)*W)+w];
__syncthreads();
} |
16600daf1eb48bab104d781190d0bb94b40a5af4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: echo "GPU binary would be here" > %t
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -O0 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip | FileCheck -check-prefixes=CHECK,O0 %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm %s -O0 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip -fcuda-is-device | FileCheck -check-prefix=DEV %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -O0 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 \
// RUN: | FileCheck -check-prefixes=CHECK,O0 %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm %s -O0 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 \
// RUN: -fcuda-is-device | FileCheck -check-prefix=DEV %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -O3 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 | FileCheck %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm %s -O3 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 \
// RUN: -fcuda-is-device | FileCheck -check-prefix=DEV %s
#include "Inputs/cuda.h"
extern "C" __global__ void ckernel(int *a) {
*a = 1;
}
// Kernel symbol for launching kernel.
// CHECK: @[[SYM:ckernel]] = constant void (i32*)* @__device_stub__ckernel, align 8
// Device side kernel names
// CHECK: @[[CKERN:[0-9]*]] = {{.*}} c"ckernel\00"
// DEV: define {{.*}}@ckernel{{.*}}!dbg
// DEV: store {{.*}}!dbg
// DEV: ret {{.*}}!dbg
// Make sure there is no !dbg between function attributes and '{'
// CHECK: define{{.*}} void @[[CSTUB:__device_stub__ckernel]]{{.*}} #{{[0-9]+}} {
// CHECK-NOT: call {{.*}}@hipLaunchByPtr{{.*}}!dbg
// CHECK: call {{.*}}@hipLaunchByPtr{{.*}}@[[SYM]]
// CHECK-NOT: ret {{.*}}!dbg
// CHECK-LABEL: define {{.*}}@_Z8hostfuncPi{{.*}}!dbg
// O0: call void @[[CSTUB]]{{.*}}!dbg
void hostfunc(int *a) {
hipLaunchKernelGGL(( ckernel), dim3(1), dim3(1), 0, 0, a);
}
| 16600daf1eb48bab104d781190d0bb94b40a5af4.cu | // RUN: echo "GPU binary would be here" > %t
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -O0 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip | FileCheck -check-prefixes=CHECK,O0 %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm %s -O0 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip -fcuda-is-device | FileCheck -check-prefix=DEV %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -O0 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 \
// RUN: | FileCheck -check-prefixes=CHECK,O0 %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm %s -O0 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 \
// RUN: -fcuda-is-device | FileCheck -check-prefix=DEV %s
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -O3 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 | FileCheck %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm %s -O3 \
// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \
// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 \
// RUN: -fcuda-is-device | FileCheck -check-prefix=DEV %s
#include "Inputs/cuda.h"
extern "C" __global__ void ckernel(int *a) {
*a = 1;
}
// Kernel symbol for launching kernel.
// CHECK: @[[SYM:ckernel]] = constant void (i32*)* @__device_stub__ckernel, align 8
// Device side kernel names
// CHECK: @[[CKERN:[0-9]*]] = {{.*}} c"ckernel\00"
// DEV: define {{.*}}@ckernel{{.*}}!dbg
// DEV: store {{.*}}!dbg
// DEV: ret {{.*}}!dbg
// Make sure there is no !dbg between function attributes and '{'
// CHECK: define{{.*}} void @[[CSTUB:__device_stub__ckernel]]{{.*}} #{{[0-9]+}} {
// CHECK-NOT: call {{.*}}@hipLaunchByPtr{{.*}}!dbg
// CHECK: call {{.*}}@hipLaunchByPtr{{.*}}@[[SYM]]
// CHECK-NOT: ret {{.*}}!dbg
// CHECK-LABEL: define {{.*}}@_Z8hostfuncPi{{.*}}!dbg
// O0: call void @[[CSTUB]]{{.*}}!dbg
void hostfunc(int *a) {
ckernel<<<1, 1>>>(a);
}
|
22876b1f1b1ec72cd40c1b4837797b79b4282084.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "read_json.hpp"
#include <io/comp/io_uncomp.hpp>
#include <io/json/legacy/read_json.hpp>
#include <io/json/nested_json.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/scatter.h>
#include <numeric>
namespace cudf::io::json::detail {
size_t sources_size(host_span<std::unique_ptr<datasource>> const sources,
size_t range_offset,
size_t range_size)
{
return std::accumulate(sources.begin(), sources.end(), 0ul, [=](size_t sum, auto& source) {
auto const size = source->size();
// TODO take care of 0, 0, or *, 0 case.
return sum +
(range_size == 0 or range_offset + range_size > size ? size - range_offset : range_size);
});
}
rmm::device_uvector<char> ingest_raw_input(host_span<std::unique_ptr<datasource>> sources,
compression_type compression,
size_t range_offset,
size_t range_size,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
// We append a line delimiter between two files to make sure the last line of file i and the first
// line of file i+1 don't end up on the same JSON line, if file i does not already end with a line
// delimiter.
auto constexpr num_delimiter_chars = 1;
auto const num_extra_delimiters = num_delimiter_chars * (sources.size() - 1);
// Iterate through the user defined sources and read the contents into the local buffer
auto const total_source_size =
sources_size(sources, range_offset, range_size) + num_extra_delimiters;
if (compression == compression_type::NONE) {
std::vector<size_type> delimiter_map{};
delimiter_map.reserve(sources.size());
auto d_buffer = rmm::device_uvector<char>(total_source_size, stream);
size_t bytes_read = 0;
std::vector<std::unique_ptr<datasource::buffer>> h_buffers;
for (auto const& source : sources) {
if (!source->is_empty()) {
auto data_size = (range_size != 0) ? range_size : source->size();
auto destination = reinterpret_cast<uint8_t*>(d_buffer.data()) + bytes_read;
if (source->is_device_read_preferred(data_size)) {
bytes_read += source->device_read(range_offset, data_size, destination, stream);
} else {
h_buffers.emplace_back(source->host_read(range_offset, data_size));
auto const& h_buffer = h_buffers.back();
CUDF_CUDA_TRY(hipMemcpyAsync(
destination, h_buffer->data(), h_buffer->size(), hipMemcpyDefault, stream.value()));
bytes_read += h_buffer->size();
}
delimiter_map.push_back(bytes_read);
bytes_read += num_delimiter_chars;
}
}
// If this is a multi-file source, we scatter the JSON line delimiters between files
if (sources.size() > 1) {
static_assert(num_delimiter_chars == 1,
"Currently only single-character delimiters are supported");
auto const delimiter_source = thrust::make_constant_iterator('\n');
auto const d_delimiter_map = cudf::detail::make_device_uvector_async(
host_span<size_type const>{delimiter_map.data(), delimiter_map.size() - 1},
stream,
rmm::mr::get_current_device_resource());
thrust::scatter(rmm::exec_policy_nosync(stream),
delimiter_source,
delimiter_source + d_delimiter_map.size(),
d_delimiter_map.data(),
d_buffer.data());
}
stream.synchronize();
return d_buffer;
} else {
auto buffer = std::vector<uint8_t>(total_source_size);
// Single read because only a single compressed source is supported
// Reading to host because decompression of a single block is much faster on the CPU
sources[0]->host_read(range_offset, total_source_size, buffer.data());
auto const uncomp_data = decompress(compression, buffer);
return cudf::detail::make_device_uvector_sync(
host_span<char const>{reinterpret_cast<char const*>(uncomp_data.data()), uncomp_data.size()},
stream,
rmm::mr::get_current_device_resource());
}
}
size_type find_first_delimiter_in_chunk(host_span<std::unique_ptr<cudf::io::datasource>> sources,
json_reader_options const& reader_opts,
char const delimiter,
rmm::cuda_stream_view stream)
{
auto const buffer = ingest_raw_input(sources,
reader_opts.get_compression(),
reader_opts.get_byte_range_offset(),
reader_opts.get_byte_range_size(),
stream);
return find_first_delimiter(buffer, delimiter, stream);
}
bool should_load_whole_source(json_reader_options const& reader_opts)
{
return reader_opts.get_byte_range_offset() == 0 and //
reader_opts.get_byte_range_size() == 0;
}
/**
* @brief Get the byte range between record starts and ends starting from the given range.
*
* if get_byte_range_offset == 0, then we can skip the first delimiter search
* if get_byte_range_offset != 0, then we need to search for the first delimiter in given range.
* if not found, skip this chunk, if found, then search for first delimiter in next range until we
* find a delimiter. Use this as actual range for parsing.
*
* @param sources Data sources to read from
* @param reader_opts JSON reader options with range offset and range size
* @param stream CUDA stream used for device memory operations and kernel launches
* @return Byte range for parsing
*/
auto get_record_range_raw_input(host_span<std::unique_ptr<datasource>> sources,
json_reader_options const& reader_opts,
rmm::cuda_stream_view stream)
{
auto buffer = ingest_raw_input(sources,
reader_opts.get_compression(),
reader_opts.get_byte_range_offset(),
reader_opts.get_byte_range_size(),
stream);
if (should_load_whole_source(reader_opts)) return buffer;
auto first_delim_pos =
reader_opts.get_byte_range_offset() == 0 ? 0 : find_first_delimiter(buffer, '\n', stream);
if (first_delim_pos == -1) {
return rmm::device_uvector<char>{0, stream};
} else {
first_delim_pos = first_delim_pos + reader_opts.get_byte_range_offset();
// Find next delimiter
decltype(first_delim_pos) next_delim_pos = -1;
auto const total_source_size = sources_size(sources, 0, 0);
auto current_offset = reader_opts.get_byte_range_offset() + reader_opts.get_byte_range_size();
while (current_offset < total_source_size and next_delim_pos == -1) {
buffer = ingest_raw_input(sources,
reader_opts.get_compression(),
current_offset,
reader_opts.get_byte_range_size(),
stream);
next_delim_pos = find_first_delimiter(buffer, '\n', stream);
if (next_delim_pos == -1) { current_offset += reader_opts.get_byte_range_size(); }
}
if (next_delim_pos == -1) {
next_delim_pos = total_source_size;
} else {
next_delim_pos = next_delim_pos + current_offset;
}
return ingest_raw_input(sources,
reader_opts.get_compression(),
first_delim_pos,
next_delim_pos - first_delim_pos,
stream);
}
}
table_with_metadata read_json(host_span<std::unique_ptr<datasource>> sources,
json_reader_options const& reader_opts,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (reader_opts.is_enabled_legacy()) {
return legacy::read_json(sources, reader_opts, stream, mr);
}
if (not should_load_whole_source(reader_opts)) {
CUDF_EXPECTS(reader_opts.is_enabled_lines(),
"Specifying a byte range is supported only for JSON Lines");
CUDF_EXPECTS(sources.size() == 1,
"Specifying a byte range is supported only for a single source");
}
if (sources.size() > 1) {
CUDF_EXPECTS(reader_opts.get_compression() == compression_type::NONE,
"Multiple compressed inputs are not supported");
CUDF_EXPECTS(reader_opts.is_enabled_lines(),
"Multiple inputs are supported only for JSON Lines format");
}
auto const buffer = get_record_range_raw_input(sources, reader_opts, stream);
return device_parse_nested_json(buffer, reader_opts, stream, mr);
// For debug purposes, use host_parse_nested_json()
}
} // namespace cudf::io::json::detail
| 22876b1f1b1ec72cd40c1b4837797b79b4282084.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "read_json.hpp"
#include <io/comp/io_uncomp.hpp>
#include <io/json/legacy/read_json.hpp>
#include <io/json/nested_json.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/scatter.h>
#include <numeric>
namespace cudf::io::json::detail {
size_t sources_size(host_span<std::unique_ptr<datasource>> const sources,
size_t range_offset,
size_t range_size)
{
return std::accumulate(sources.begin(), sources.end(), 0ul, [=](size_t sum, auto& source) {
auto const size = source->size();
// TODO take care of 0, 0, or *, 0 case.
return sum +
(range_size == 0 or range_offset + range_size > size ? size - range_offset : range_size);
});
}
rmm::device_uvector<char> ingest_raw_input(host_span<std::unique_ptr<datasource>> sources,
compression_type compression,
size_t range_offset,
size_t range_size,
rmm::cuda_stream_view stream)
{
CUDF_FUNC_RANGE();
// We append a line delimiter between two files to make sure the last line of file i and the first
// line of file i+1 don't end up on the same JSON line, if file i does not already end with a line
// delimiter.
auto constexpr num_delimiter_chars = 1;
auto const num_extra_delimiters = num_delimiter_chars * (sources.size() - 1);
// Iterate through the user defined sources and read the contents into the local buffer
auto const total_source_size =
sources_size(sources, range_offset, range_size) + num_extra_delimiters;
if (compression == compression_type::NONE) {
std::vector<size_type> delimiter_map{};
delimiter_map.reserve(sources.size());
auto d_buffer = rmm::device_uvector<char>(total_source_size, stream);
size_t bytes_read = 0;
std::vector<std::unique_ptr<datasource::buffer>> h_buffers;
for (auto const& source : sources) {
if (!source->is_empty()) {
auto data_size = (range_size != 0) ? range_size : source->size();
auto destination = reinterpret_cast<uint8_t*>(d_buffer.data()) + bytes_read;
if (source->is_device_read_preferred(data_size)) {
bytes_read += source->device_read(range_offset, data_size, destination, stream);
} else {
h_buffers.emplace_back(source->host_read(range_offset, data_size));
auto const& h_buffer = h_buffers.back();
CUDF_CUDA_TRY(cudaMemcpyAsync(
destination, h_buffer->data(), h_buffer->size(), cudaMemcpyDefault, stream.value()));
bytes_read += h_buffer->size();
}
delimiter_map.push_back(bytes_read);
bytes_read += num_delimiter_chars;
}
}
// If this is a multi-file source, we scatter the JSON line delimiters between files
if (sources.size() > 1) {
static_assert(num_delimiter_chars == 1,
"Currently only single-character delimiters are supported");
auto const delimiter_source = thrust::make_constant_iterator('\n');
auto const d_delimiter_map = cudf::detail::make_device_uvector_async(
host_span<size_type const>{delimiter_map.data(), delimiter_map.size() - 1},
stream,
rmm::mr::get_current_device_resource());
thrust::scatter(rmm::exec_policy_nosync(stream),
delimiter_source,
delimiter_source + d_delimiter_map.size(),
d_delimiter_map.data(),
d_buffer.data());
}
stream.synchronize();
return d_buffer;
} else {
auto buffer = std::vector<uint8_t>(total_source_size);
// Single read because only a single compressed source is supported
// Reading to host because decompression of a single block is much faster on the CPU
sources[0]->host_read(range_offset, total_source_size, buffer.data());
auto const uncomp_data = decompress(compression, buffer);
return cudf::detail::make_device_uvector_sync(
host_span<char const>{reinterpret_cast<char const*>(uncomp_data.data()), uncomp_data.size()},
stream,
rmm::mr::get_current_device_resource());
}
}
size_type find_first_delimiter_in_chunk(host_span<std::unique_ptr<cudf::io::datasource>> sources,
json_reader_options const& reader_opts,
char const delimiter,
rmm::cuda_stream_view stream)
{
auto const buffer = ingest_raw_input(sources,
reader_opts.get_compression(),
reader_opts.get_byte_range_offset(),
reader_opts.get_byte_range_size(),
stream);
return find_first_delimiter(buffer, delimiter, stream);
}
bool should_load_whole_source(json_reader_options const& reader_opts)
{
return reader_opts.get_byte_range_offset() == 0 and //
reader_opts.get_byte_range_size() == 0;
}
/**
* @brief Get the byte range between record starts and ends starting from the given range.
*
* if get_byte_range_offset == 0, then we can skip the first delimiter search
* if get_byte_range_offset != 0, then we need to search for the first delimiter in given range.
* if not found, skip this chunk, if found, then search for first delimiter in next range until we
* find a delimiter. Use this as actual range for parsing.
*
* @param sources Data sources to read from
* @param reader_opts JSON reader options with range offset and range size
* @param stream CUDA stream used for device memory operations and kernel launches
* @return Byte range for parsing
*/
auto get_record_range_raw_input(host_span<std::unique_ptr<datasource>> sources,
json_reader_options const& reader_opts,
rmm::cuda_stream_view stream)
{
auto buffer = ingest_raw_input(sources,
reader_opts.get_compression(),
reader_opts.get_byte_range_offset(),
reader_opts.get_byte_range_size(),
stream);
if (should_load_whole_source(reader_opts)) return buffer;
auto first_delim_pos =
reader_opts.get_byte_range_offset() == 0 ? 0 : find_first_delimiter(buffer, '\n', stream);
if (first_delim_pos == -1) {
return rmm::device_uvector<char>{0, stream};
} else {
first_delim_pos = first_delim_pos + reader_opts.get_byte_range_offset();
// Find next delimiter
decltype(first_delim_pos) next_delim_pos = -1;
auto const total_source_size = sources_size(sources, 0, 0);
auto current_offset = reader_opts.get_byte_range_offset() + reader_opts.get_byte_range_size();
while (current_offset < total_source_size and next_delim_pos == -1) {
buffer = ingest_raw_input(sources,
reader_opts.get_compression(),
current_offset,
reader_opts.get_byte_range_size(),
stream);
next_delim_pos = find_first_delimiter(buffer, '\n', stream);
if (next_delim_pos == -1) { current_offset += reader_opts.get_byte_range_size(); }
}
if (next_delim_pos == -1) {
next_delim_pos = total_source_size;
} else {
next_delim_pos = next_delim_pos + current_offset;
}
return ingest_raw_input(sources,
reader_opts.get_compression(),
first_delim_pos,
next_delim_pos - first_delim_pos,
stream);
}
}
table_with_metadata read_json(host_span<std::unique_ptr<datasource>> sources,
json_reader_options const& reader_opts,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
if (reader_opts.is_enabled_legacy()) {
return legacy::read_json(sources, reader_opts, stream, mr);
}
if (not should_load_whole_source(reader_opts)) {
CUDF_EXPECTS(reader_opts.is_enabled_lines(),
"Specifying a byte range is supported only for JSON Lines");
CUDF_EXPECTS(sources.size() == 1,
"Specifying a byte range is supported only for a single source");
}
if (sources.size() > 1) {
CUDF_EXPECTS(reader_opts.get_compression() == compression_type::NONE,
"Multiple compressed inputs are not supported");
CUDF_EXPECTS(reader_opts.is_enabled_lines(),
"Multiple inputs are supported only for JSON Lines format");
}
auto const buffer = get_record_range_raw_input(sources, reader_opts, stream);
return device_parse_nested_json(buffer, reader_opts, stream, mr);
// For debug purposes, use host_parse_nested_json()
}
} // namespace cudf::io::json::detail
|
7d2dadc92de2c34e6f2e889d168516150c2d68c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex halva(hipComplex z)
{
hipComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex aliva(hipComplex z)
{
hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ hipComplex ariva(hipComplex z)
{
hipComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex arreg(hipComplex q, hipComplex r, hipComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
hipComplex out(0.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
hipComplex morra(-1.0,0.0);
hipComplex tla(1.0,0.0);
hipComplex vnn(0.0,0.0);
hipComplex fou(4.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex run(1.0,0.0);
int v;
for(v=0;v<20;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*sins(tw*z*run)/(run-roo);
}
return fou*out;
}
__device__ hipComplex urreg(hipComplex q, hipComplex r, hipComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
hipComplex out(0.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
hipComplex morra(-1.0,0.0);
hipComplex tla(1.0,0.0);
hipComplex vnn(0.0,0.0);
hipComplex fou(4.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex run(1.0,0.0);
int v;
for(v=0;v<10;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*the3(tw*z*run,r)/(run-roo);
}
return fou*out;
}
// * small q-exponential
__device__ hipComplex qexp(hipComplex z, hipComplex q)
{
hipComplex mone(-1.0,0.0);
hipComplex une(1.0,0.0);
return une/qpoch(z,q);
}
//* large q exponential is just qpoch(-z,q)
__device__ hipComplex qExp(hipComplex z, hipComplex q)
{
hipComplex mone(-1.0,0.0);
hipComplex une(1.0,0.0);
return qpoch(mone*z,q);
}
__device__ hipComplex sinq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qexp(z*aie,q) -qexp(z*aie,q))/doo;
return out;
}
__device__ hipComplex cosq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qexp(z*aie,q) +qexp(z*aie,q))/doo;
return out;
}
__device__ hipComplex Sinq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qExp(z*aie,q) -qExp(z*aie,q))/doo;
return out;
}
__device__ hipComplex Cosq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qExp(z*aie,q) +qExp(z*aie,q))/doo;
return out;
}
__device__ hipComplex asins(hipComplex z)
{
float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float fla = z.i/abs(z.i);
// *signum, but without a comparison, probably a saner way to do this? //
hipComplex out(0.0,0.0);
out.r = asinf(bet);
out.i = fla * logf(alp + sqrtf(alp*alp-1));
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale = 2.0;
float fx = scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(1.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
hipComplex gren(2.0,0.0);
hipComplex next=flurn;
hipComplex current = cue;
hipComplex xnext = flurn;
hipComplex xcurrent = cue;
hipComplex tinny(.0001,0.0001);
float xa,xb,ya,yb,tta,ttb;
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
int uu;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// One way of describing this would be we want to perform Newton's method
//on the Mandelbrot set
/* preiterate */
//tex.stackexchange.com/questions/278843/making-a-phase-portrait-of-two-autonomous-system-of-differential-equations-with?fbclid=IwAR2Tz66CbUAq7LFVYck4uUGF5uQWnmzf5iZw3Bi8IOycvCC7czO6ZVgkz3s
// this is not terribly hard to do with cuda
// what we need:
// x' = x - y -> dx / dt = x - y
// y' = 1 - x^2 -> dy / dt = 1-x^2
// dy / dx = (dy / dt) / (dx/ dt)
// so the trick is to convert dy/dx into a unit complex number to make this work, okay that's not that difficult
xa = cue.r;
ya = cue.i;
for(v=0;v<30;v++)
{
xb = LA*xa - ya;
yb = 1 - LB*cyl_bessel_i1f(ya/xa)-xa*xa;
xa = xb;
ya = yb;
}
q.r = xb;
q.i = yb;
cue = q;
cue.r = cue.r/norg(q);
cue.i = cue.i/norg(q);
cue = hilva(cue);
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ | 7d2dadc92de2c34e6f2e889d168516150c2d68c4.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex halva(cuComplex z)
{
cuComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex aliva(cuComplex z)
{
cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ cuComplex ariva(cuComplex z)
{
cuComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex arreg(cuComplex q, cuComplex r, cuComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
cuComplex out(0.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
cuComplex morra(-1.0,0.0);
cuComplex tla(1.0,0.0);
cuComplex vnn(0.0,0.0);
cuComplex fou(4.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex run(1.0,0.0);
int v;
for(v=0;v<20;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*sins(tw*z*run)/(run-roo);
}
return fou*out;
}
__device__ cuComplex urreg(cuComplex q, cuComplex r, cuComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
cuComplex out(0.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
cuComplex morra(-1.0,0.0);
cuComplex tla(1.0,0.0);
cuComplex vnn(0.0,0.0);
cuComplex fou(4.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex run(1.0,0.0);
int v;
for(v=0;v<10;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*the3(tw*z*run,r)/(run-roo);
}
return fou*out;
}
// * small q-exponential
__device__ cuComplex qexp(cuComplex z, cuComplex q)
{
cuComplex mone(-1.0,0.0);
cuComplex une(1.0,0.0);
return une/qpoch(z,q);
}
//* large q exponential is just qpoch(-z,q)
__device__ cuComplex qExp(cuComplex z, cuComplex q)
{
cuComplex mone(-1.0,0.0);
cuComplex une(1.0,0.0);
return qpoch(mone*z,q);
}
__device__ cuComplex sinq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qexp(z*aie,q) -qexp(z*aie,q))/doo;
return out;
}
__device__ cuComplex cosq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qexp(z*aie,q) +qexp(z*aie,q))/doo;
return out;
}
__device__ cuComplex Sinq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qExp(z*aie,q) -qExp(z*aie,q))/doo;
return out;
}
__device__ cuComplex Cosq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qExp(z*aie,q) +qExp(z*aie,q))/doo;
return out;
}
__device__ cuComplex asins(cuComplex z)
{
float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float fla = z.i/abs(z.i);
// *signum, but without a comparison, probably a saner way to do this? //
cuComplex out(0.0,0.0);
out.r = asinf(bet);
out.i = fla * logf(alp + sqrtf(alp*alp-1));
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale = 2.0;
float fx = scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(1.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
cuComplex gren(2.0,0.0);
cuComplex next=flurn;
cuComplex current = cue;
cuComplex xnext = flurn;
cuComplex xcurrent = cue;
cuComplex tinny(.0001,0.0001);
float xa,xb,ya,yb,tta,ttb;
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
int uu;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// One way of describing this would be we want to perform Newton's method
//on the Mandelbrot set
/* preiterate */
//tex.stackexchange.com/questions/278843/making-a-phase-portrait-of-two-autonomous-system-of-differential-equations-with?fbclid=IwAR2Tz66CbUAq7LFVYck4uUGF5uQWnmzf5iZw3Bi8IOycvCC7czO6ZVgkz3s
// this is not terribly hard to do with cuda
// what we need:
// x' = x - y -> dx / dt = x - y
// y' = 1 - x^2 -> dy / dt = 1-x^2
// dy / dx = (dy / dt) / (dx/ dt)
// so the trick is to convert dy/dx into a unit complex number to make this work, okay that's not that difficult
xa = cue.r;
ya = cue.i;
for(v=0;v<30;v++)
{
xb = LA*xa - ya;
yb = 1 - LB*cyl_bessel_i1f(ya/xa)-xa*xa;
xa = xb;
ya = yb;
}
q.r = xb;
q.i = yb;
cue = q;
cue.r = cue.r/norg(q);
cue.i = cue.i/norg(q);
cue = hilva(cue);
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ |
2c235c2b5f55ef1d7679af9b7c8b04abe82fa5dd.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <math.h>
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "MiniWrapForCuda.h"
#include <ctime>
#define epsPsi 0.001f
#define _BLOCK_SIZE 32
#define a 0.1f
#define c 1.0f
#define g 9.8f
#define betta 0.003665f
#define tetta 1.85f
using namespace std;
//
class _Time{
hipEvent_t Tn, Tk;
float time;
public:
_Time(){
hipEventCreate(&Tn);
hipEventCreate(&Tk);
}
~_Time(){
hipEventDestroy(Tn);
hipEventDestroy(Tk);
}
void tn(){
hipEventRecord(Tn, 0);
}
float tk(){
hipEventRecord(Tk, 0);
hipEventSynchronize(Tk);
hipEventElapsedTime(&time, Tn, Tk);
return time;
}
};
// ( )
__global__ void kernel_gelmgolca(int X, int Y, double *w, double *wn, double *psi, double *ux, double *uy, double*Temp, double h, double tau, double nuM){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<X) && (j<Y)){
w[i] = -(psi[i + X] - psi[i]) / (h*h);
w[i + (Y - 1)*X] = -(psi[i + (Y - 2)*X] - psi[i + (Y - 1)*X]) / (h*h);
w[j*X] = 0;
w[j*X + (X - 1)] = w[j*X + (X - 2)];
}
if ((i<(X - 1)) && (j<(Y - 1)) && (i>0) && (j>0)){
float dux, duy;
if (ux[j*X + i] < 0)
dux = (w[j*X + i + 1] - w[j*X + i]) / h;
else
dux = (w[j*X + i] - w[j*X + i - 1]) / h;
if (uy[j*X + i] < 0)
duy = (w[(j + 1)*X + i] - w[j*X + i]) / h;
else
duy = (w[j*X + i] - w[(j - 1)*X + i]) / h;
wn[j*X + i] = w[j*X + i] + tau*(-ux[j*X + i] * dux - uy[j*X + i] * duy + nuM*
(w[j*X + i + 1] + w[j*X + i - 1] + w[(j + 1)*X + i] + w[(j - 1)*X + i] - 4 * w[j*X + i]) / (h*h)
- g*betta*Temp[j*X + i]);
}
}
// ( )
__global__ void kernel_puasson(int X, int Y, double *psi, double *w, int *pr, double h, double *psin){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<(X - 1)) && (j<(Y - 1)) && (i>0) && (j>0)){
pr[j*X + i] = 0;
psin[j*X + i] = 0.25*(psi[j*X + i + 1] + psi[j*X + i - 1] +
psi[(j + 1)*X + i] + psi[(j - 1)*X + i] + h*h*w[j*X + i]);
if (fabs(psin[j*X + i] - psi[j*X + i]) >= epsPsi)
pr[j*X + i] = 1;
}
}
//
__global__ void kernel_skorosti(int X, int Y, double *psi, double *ux, double *uy, double h){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if ((i>0) && (j>0) && (i<(X - 1)) && (j<(Y - 1))){
ux[j*X + i] = -(psi[(j + 1)*X + i + 1] + psi[(j + 1)*X + i - 1]
- psi[(j - 1)*X + i + 1] - psi[(j - 1)*X + i - 1]) / (4 * h);
uy[j*X + i] = (psi[(j + 1)*X + i + 1] - psi[(j + 1)*X + i - 1]
+ psi[(j - 1)*X + i + 1] - psi[(j - 1)*X + i - 1]) / (4 * h);
}
}
//
__global__ void _kernel_pTemp(int X, int Y, int x0, int len, double *Temp, double *Tempn){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<(X - 1)) && (j<(Y - 1)) && (i>0) && (j>0)){
Temp[j*X + i] = Tempn[j*X + i];
if ((i < x0) || (i >= x0 + len))
Temp[(Y - 1)*X + i] = Tempn[(Y - 1)*X + i];
Temp[i] = Tempn[i];
}
}
//
__global__ void kernel_p(int X, int Y, double *psi, double *psin){
int i = blockIdx.x*blockDim.x + threadIdx.x + 1;
int j = blockIdx.y*blockDim.y + threadIdx.y + 1;
if ((i<(X - 1)) && (j<(Y - 1)))
psi[j*X + i] = psin[j*X + i];
}
//
__global__ void _kernel_temp(int X, int Y, int x0, int len, double *Ux, double *Uy, double *Temp, double *Tempn, double nuM, double h, double tau){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<(X - 1)) && (j<(Y - 1)) && (i>0) && (j>0)){
Tempn[j*X + i] = Temp[j*X + i] + tau * (-(Ux[j*X + i] + abs(Ux[j*X + i])) / 2.0 * (Temp[j*X + i] - Temp[j*X + i - 1]) / h
- (Ux[j*X + i] - abs(Ux[j*X + i])) / 2.0 * (Temp[j*X + i + 1] - Temp[j*X + i]) / h
- (Uy[j*X + i] + abs(Uy[j*X + i])) / 2.0 * (Temp[j*X + i] - Temp[(j - 1)*X + i]) / h
- (Uy[j*X + i] - abs(Uy[j*X + i])) / 2.0 * (Temp[(j + 1)*X + i] - Temp[j*X + i]) / h
+ c*(nuM)* (Ux[j*X + i + 1] + Ux[j*X + i - 1] + Uy[(j + 1)*X + i] + Uy[(j - 1)*X + i] - 2 * Ux[j*X + i] - 2 * Uy[j*X + i]) /
(h * h));
Temp[j*X + X - 1] = Tempn[j*X + X - 2];
}
//
if ((i<(X - 1)) && (i>0)){
//
if ((i < x0) || (i >= x0 + len))
Tempn[(Y - 1)*X + i] = Temp[(Y - 1)*X + i] +
tau*a*a / (h*h)*
(Temp[(Y - 1)*X + i + 1] + Temp[(Y - 1)*X + i - 1] + Temp[(Y - 2)*X + i] - 4 * Temp[(Y - 1)*X + i]);
//
Tempn[i] = Temp[i] + tau*a*a / (h*h)*(Temp[i + 1, 0] + Temp[i - 1] + Temp[X + i] - 4 * Temp[i]);
}
}
double *_UxDev = NULL, *_UyDev = NULL, *_UxnDev = NULL, *_UynDev = NULL, *wDev = NULL, *wnDev = NULL, *psiDev = NULL, *psinDev = NULL, *_TempDev = NULL, *_TempnDev;
int *prDev = NULL;
int _X, _Y;
int _x0, _len;
double _tau, _h;
double _nuM, _ro;
int _sizef, sizei;
int _gridSizeX, _gridSizeY;
_Time* _timer;
double ComputeWPsi(ComputeOnCUDA::WPsi::HelmholtzCalcMethod hcm, ComputeOnCUDA::TurbulenceModel tm, double *Ux, double *Uy, double *Temp, double tmax) {
double t = 0;
double fulltime;
//
dim3 threads(_BLOCK_SIZE, _BLOCK_SIZE);
dim3 blocks(_gridSizeX, _gridSizeY);
//
hipMemcpy(_UxDev, Ux, _sizef, hipMemcpyHostToDevice);
hipMemcpy(_UyDev, Uy, _sizef, hipMemcpyHostToDevice);
hipMemcpy(_TempDev, Temp, _sizef, hipMemcpyHostToDevice);
bool flag = false;
int *pr = NULL;
pr = new int[_X*_Y];
do{
_kernel_temp << <blocks, threads >> >(_X, _Y, _x0, _len, _UxDev, _UyDev, _TempDev, _TempnDev, _nuM, _h, _tau);
_kernel_pTemp << <blocks, threads >> >(_X, _Y, _x0, _len, _TempDev, _TempnDev);
//
kernel_gelmgolca << <blocks, threads >> >(_X, _Y, wDev, wnDev, psiDev,
_UxDev, _UyDev, _TempDev, _h, _tau, _nuM);
kernel_p << <blocks, threads >> >(_X, _Y, wDev, wnDev);
//
do {
flag = false;
//
kernel_puasson << <blocks, threads >> >(_X, _Y, psiDev, wDev, prDev, _h,psinDev);
kernel_p << <blocks, threads >> >(_X, _Y, psiDev, psinDev);
//
hipDeviceSynchronize();
//
hipMemcpy(pr, prDev, sizei, hipMemcpyDeviceToHost);
for (int j = 1; j<_Y - 1; j++)
for (int i = 1; i<_X - 1; i++)
if (pr[j*_X + i] == 1){
flag = true;
j = _Y; i = _X;
}
} while (flag);
//
kernel_skorosti << <blocks, threads >> >(_X, _Y, psiDev, _UxDev, _UyDev, _h);
t += _tau;
} while (t <= tmax);
//
hipDeviceSynchronize();
//
hipMemcpy(Ux, _UxDev, _sizef, hipMemcpyDeviceToHost);
hipMemcpy(Uy, _UyDev, _sizef, hipMemcpyDeviceToHost);
hipMemcpy(Temp, _TempDev, _sizef, hipMemcpyDeviceToHost);
fulltime = _timer->tk();
return fulltime / 1000.0;
}
void ConstructorWPsi(double tau, double nuM, int x0, int len, double h, int X, int Y, double *Ux, double *Uy){
_tau = tau;
_nuM = nuM;
_x0 = x0;
_len = len;
_h = h;
_X = X;
_Y = Y;
double *psi = new double[X*Y];//
double *w = new double[X*Y];//
_sizef = X*Y*sizeof(double);
sizei = X*Y*sizeof(int);
_timer = new _Time();
//
for (int i = 0; i < X; i++)
for (int j = 0; j < Y; j++) {
psi[j * X + i] = 0.0;
w[j * X + i] = 0.0;
}
//
for (int i = X - 2; i >= 0; i--){
if (i > x0 + len)
psi[i + (Y - 1)*X] = 0.0;
if ((i >= x0) && (i <= x0 + len))
psi[i + (Y - 1)*X] = psi[i + (Y - 1)*X + 1] + fabs(Uy[i + (Y - 1)*X]) * h;
if (i < x0)
psi[i + (Y - 1)*X] = psi[i + (Y - 1)*X + 1];
}
for (int j = Y - 2; j >= 0; j--)
psi[j*X] = psi[(j + 1)*X] + Ux[j*X] * h;
for (int i = 1; i<X; i++)
psi[i] = psi[i - 1];
for (int j = Y - 2; j >= 0; j--)
psi[j*X + (X - 1)] = psi[(j + 1)*X + (X - 1)] + Ux[j*X + (X - 1)] * h;
//
_gridSizeX = (X / _BLOCK_SIZE) + ((X % _BLOCK_SIZE) > 0 ? 1 : 0);
_gridSizeY = (Y / _BLOCK_SIZE) + ((Y % _BLOCK_SIZE) > 0 ? 1 : 0);
//
hipMalloc((void**)&_UxDev, _sizef);
hipMalloc((void**)&_UxnDev, _sizef);
hipMalloc((void**)&_UyDev, _sizef);
hipMalloc((void**)&_UynDev, _sizef);
hipMalloc((void**)&psiDev, _sizef);
hipMalloc((void**)&psinDev, _sizef);
hipMalloc((void**)&wDev, _sizef);
hipMalloc((void**)&wnDev, _sizef);
hipMalloc((void**)&prDev, sizei);
hipMalloc((void**)&_TempDev, _sizef);
hipMalloc((void**)&_TempnDev, _sizef);
//
_timer->tn();
hipMemcpy(wDev, w, _sizef, hipMemcpyHostToDevice);
hipMemcpy(psiDev, psi, _sizef, hipMemcpyHostToDevice);
}
void DestructorWPsi() {
hipFree(_UxDev);
hipFree(_UxnDev);
hipFree(_UyDev);
hipFree(_UynDev);
hipFree(wDev);
hipFree(wnDev);
hipFree(psiDev);
hipFree(psinDev);
hipFree(prDev);
hipFree(_TempDev);
hipFree(_TempnDev);
} | 2c235c2b5f55ef1d7679af9b7c8b04abe82fa5dd.cu | #pragma once
#include <math.h>
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "MiniWrapForCuda.h"
#include <ctime>
#define epsPsi 0.001f
#define _BLOCK_SIZE 32
#define a 0.1f
#define c 1.0f
#define g 9.8f
#define betta 0.003665f
#define tetta 1.85f
using namespace std;
// методы для вычисления времени
class _Time{
cudaEvent_t Tn, Tk;
float time;
public:
_Time(){
cudaEventCreate(&Tn);
cudaEventCreate(&Tk);
}
~_Time(){
cudaEventDestroy(Tn);
cudaEventDestroy(Tk);
}
void tn(){
cudaEventRecord(Tn, 0);
}
float tk(){
cudaEventRecord(Tk, 0);
cudaEventSynchronize(Tk);
cudaEventElapsedTime(&time, Tn, Tk);
return time;
}
};
//уравнение Гельмгольца (противоточные производные)
__global__ void kernel_gelmgolca(int X, int Y, double *w, double *wn, double *psi, double *ux, double *uy, double*Temp, double h, double tau, double nuM){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<X) && (j<Y)){
w[i] = -(psi[i + X] - psi[i]) / (h*h);
w[i + (Y - 1)*X] = -(psi[i + (Y - 2)*X] - psi[i + (Y - 1)*X]) / (h*h);
w[j*X] = 0;
w[j*X + (X - 1)] = w[j*X + (X - 2)];
}
if ((i<(X - 1)) && (j<(Y - 1)) && (i>0) && (j>0)){
float dux, duy;
if (ux[j*X + i] < 0)
dux = (w[j*X + i + 1] - w[j*X + i]) / h;
else
dux = (w[j*X + i] - w[j*X + i - 1]) / h;
if (uy[j*X + i] < 0)
duy = (w[(j + 1)*X + i] - w[j*X + i]) / h;
else
duy = (w[j*X + i] - w[(j - 1)*X + i]) / h;
wn[j*X + i] = w[j*X + i] + tau*(-ux[j*X + i] * dux - uy[j*X + i] * duy + nuM*
(w[j*X + i + 1] + w[j*X + i - 1] + w[(j + 1)*X + i] + w[(j - 1)*X + i] - 4 * w[j*X + i]) / (h*h)
- g*betta*Temp[j*X + i]);
}
}
//уравнение Пуассона (метод верхней релаксации)
__global__ void kernel_puasson(int X, int Y, double *psi, double *w, int *pr, double h, double *psin){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<(X - 1)) && (j<(Y - 1)) && (i>0) && (j>0)){
pr[j*X + i] = 0;
psin[j*X + i] = 0.25*(psi[j*X + i + 1] + psi[j*X + i - 1] +
psi[(j + 1)*X + i] + psi[(j - 1)*X + i] + h*h*w[j*X + i]);
if (fabs(psin[j*X + i] - psi[j*X + i]) >= epsPsi)
pr[j*X + i] = 1;
}
}
//вычисление скоростей
__global__ void kernel_skorosti(int X, int Y, double *psi, double *ux, double *uy, double h){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if ((i>0) && (j>0) && (i<(X - 1)) && (j<(Y - 1))){
ux[j*X + i] = -(psi[(j + 1)*X + i + 1] + psi[(j + 1)*X + i - 1]
- psi[(j - 1)*X + i + 1] - psi[(j - 1)*X + i - 1]) / (4 * h);
uy[j*X + i] = (psi[(j + 1)*X + i + 1] - psi[(j + 1)*X + i - 1]
+ psi[(j - 1)*X + i + 1] - psi[(j - 1)*X + i - 1]) / (4 * h);
}
}
//переприсваивание
__global__ void _kernel_pTemp(int X, int Y, int x0, int len, double *Temp, double *Tempn){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<(X - 1)) && (j<(Y - 1)) && (i>0) && (j>0)){
Temp[j*X + i] = Tempn[j*X + i];
if ((i < x0) || (i >= x0 + len))
Temp[(Y - 1)*X + i] = Tempn[(Y - 1)*X + i];
Temp[i] = Tempn[i];
}
}
//переприсваивание
__global__ void kernel_p(int X, int Y, double *psi, double *psin){
int i = blockIdx.x*blockDim.x + threadIdx.x + 1;
int j = blockIdx.y*blockDim.y + threadIdx.y + 1;
if ((i<(X - 1)) && (j<(Y - 1)))
psi[j*X + i] = psin[j*X + i];
}
//вычисление температуры
__global__ void _kernel_temp(int X, int Y, int x0, int len, double *Ux, double *Uy, double *Temp, double *Tempn, double nuM, double h, double tau){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<(X - 1)) && (j<(Y - 1)) && (i>0) && (j>0)){
Tempn[j*X + i] = Temp[j*X + i] + tau * (-(Ux[j*X + i] + abs(Ux[j*X + i])) / 2.0 * (Temp[j*X + i] - Temp[j*X + i - 1]) / h
- (Ux[j*X + i] - abs(Ux[j*X + i])) / 2.0 * (Temp[j*X + i + 1] - Temp[j*X + i]) / h
- (Uy[j*X + i] + abs(Uy[j*X + i])) / 2.0 * (Temp[j*X + i] - Temp[(j - 1)*X + i]) / h
- (Uy[j*X + i] - abs(Uy[j*X + i])) / 2.0 * (Temp[(j + 1)*X + i] - Temp[j*X + i]) / h
+ c*(nuM)* (Ux[j*X + i + 1] + Ux[j*X + i - 1] + Uy[(j + 1)*X + i] + Uy[(j - 1)*X + i] - 2 * Ux[j*X + i] - 2 * Uy[j*X + i]) /
(h * h));
Temp[j*X + X - 1] = Tempn[j*X + X - 2];
}
//температура в стенках
if ((i<(X - 1)) && (i>0)){
//на границе снизу
if ((i < x0) || (i >= x0 + len))
Tempn[(Y - 1)*X + i] = Temp[(Y - 1)*X + i] +
tau*a*a / (h*h)*
(Temp[(Y - 1)*X + i + 1] + Temp[(Y - 1)*X + i - 1] + Temp[(Y - 2)*X + i] - 4 * Temp[(Y - 1)*X + i]);
//на границе сверху
Tempn[i] = Temp[i] + tau*a*a / (h*h)*(Temp[i + 1, 0] + Temp[i - 1] + Temp[X + i] - 4 * Temp[i]);
}
}
double *_UxDev = NULL, *_UyDev = NULL, *_UxnDev = NULL, *_UynDev = NULL, *wDev = NULL, *wnDev = NULL, *psiDev = NULL, *psinDev = NULL, *_TempDev = NULL, *_TempnDev;
int *prDev = NULL;
int _X, _Y;
int _x0, _len;
double _tau, _h;
double _nuM, _ro;
int _sizef, sizei;
int _gridSizeX, _gridSizeY;
_Time* _timer;
double ComputeWPsi(ComputeOnCUDA::WPsi::HelmholtzCalcMethod hcm, ComputeOnCUDA::TurbulenceModel tm, double *Ux, double *Uy, double *Temp, double tmax) {
double t = 0;
double fulltime;
//определение числа блоков и потоков
dim3 threads(_BLOCK_SIZE, _BLOCK_SIZE);
dim3 blocks(_gridSizeX, _gridSizeY);
//копирование значений с хоста в память устройства
cudaMemcpy(_UxDev, Ux, _sizef, cudaMemcpyHostToDevice);
cudaMemcpy(_UyDev, Uy, _sizef, cudaMemcpyHostToDevice);
cudaMemcpy(_TempDev, Temp, _sizef, cudaMemcpyHostToDevice);
bool flag = false;
int *pr = NULL;
pr = new int[_X*_Y];
do{
_kernel_temp << <blocks, threads >> >(_X, _Y, _x0, _len, _UxDev, _UyDev, _TempDev, _TempnDev, _nuM, _h, _tau);
_kernel_pTemp << <blocks, threads >> >(_X, _Y, _x0, _len, _TempDev, _TempnDev);
//находим поле вихря
kernel_gelmgolca << <blocks, threads >> >(_X, _Y, wDev, wnDev, psiDev,
_UxDev, _UyDev, _TempDev, _h, _tau, _nuM);
kernel_p << <blocks, threads >> >(_X, _Y, wDev, wnDev);
//решение уравнения Пуассона до достижения точности
do {
flag = false;
//запуск ядер устройства
kernel_puasson << <blocks, threads >> >(_X, _Y, psiDev, wDev, prDev, _h,psinDev);
kernel_p << <blocks, threads >> >(_X, _Y, psiDev, psinDev);
//синхронизация устройства и хоста
cudaDeviceSynchronize();
//копирование значений с устройства в память хоста
cudaMemcpy(pr, prDev, sizei, cudaMemcpyDeviceToHost);
for (int j = 1; j<_Y - 1; j++)
for (int i = 1; i<_X - 1; i++)
if (pr[j*_X + i] == 1){
flag = true;
j = _Y; i = _X;
}
} while (flag);
//находим скорости
kernel_skorosti << <blocks, threads >> >(_X, _Y, psiDev, _UxDev, _UyDev, _h);
t += _tau;
} while (t <= tmax);
//синхронизация устройства и хоста
cudaThreadSynchronize();
//копирование значений с устройства в память хоста
cudaMemcpy(Ux, _UxDev, _sizef, cudaMemcpyDeviceToHost);
cudaMemcpy(Uy, _UyDev, _sizef, cudaMemcpyDeviceToHost);
cudaMemcpy(Temp, _TempDev, _sizef, cudaMemcpyDeviceToHost);
fulltime = _timer->tk();
return fulltime / 1000.0;
}
void ConstructorWPsi(double tau, double nuM, int x0, int len, double h, int X, int Y, double *Ux, double *Uy){
_tau = tau;
_nuM = nuM;
_x0 = x0;
_len = len;
_h = h;
_X = X;
_Y = Y;
double *psi = new double[X*Y];//функция тока
double *w = new double[X*Y];//функция тока
_sizef = X*Y*sizeof(double);
sizei = X*Y*sizeof(int);
_timer = new _Time();
//начальные условия
for (int i = 0; i < X; i++)
for (int j = 0; j < Y; j++) {
psi[j * X + i] = 0.0;
w[j * X + i] = 0.0;
}
//функция тока на границах
for (int i = X - 2; i >= 0; i--){
if (i > x0 + len)
psi[i + (Y - 1)*X] = 0.0;
if ((i >= x0) && (i <= x0 + len))
psi[i + (Y - 1)*X] = psi[i + (Y - 1)*X + 1] + fabs(Uy[i + (Y - 1)*X]) * h;
if (i < x0)
psi[i + (Y - 1)*X] = psi[i + (Y - 1)*X + 1];
}
for (int j = Y - 2; j >= 0; j--)
psi[j*X] = psi[(j + 1)*X] + Ux[j*X] * h;
for (int i = 1; i<X; i++)
psi[i] = psi[i - 1];
for (int j = Y - 2; j >= 0; j--)
psi[j*X + (X - 1)] = psi[(j + 1)*X + (X - 1)] + Ux[j*X + (X - 1)] * h;
//определение размера грида
_gridSizeX = (X / _BLOCK_SIZE) + ((X % _BLOCK_SIZE) > 0 ? 1 : 0);
_gridSizeY = (Y / _BLOCK_SIZE) + ((Y % _BLOCK_SIZE) > 0 ? 1 : 0);
//выделение памяти на устройстве
cudaMalloc((void**)&_UxDev, _sizef);
cudaMalloc((void**)&_UxnDev, _sizef);
cudaMalloc((void**)&_UyDev, _sizef);
cudaMalloc((void**)&_UynDev, _sizef);
cudaMalloc((void**)&psiDev, _sizef);
cudaMalloc((void**)&psinDev, _sizef);
cudaMalloc((void**)&wDev, _sizef);
cudaMalloc((void**)&wnDev, _sizef);
cudaMalloc((void**)&prDev, sizei);
cudaMalloc((void**)&_TempDev, _sizef);
cudaMalloc((void**)&_TempnDev, _sizef);
//старт замера времени вычислений
_timer->tn();
cudaMemcpy(wDev, w, _sizef, cudaMemcpyHostToDevice);
cudaMemcpy(psiDev, psi, _sizef, cudaMemcpyHostToDevice);
}
void DestructorWPsi() {
cudaFree(_UxDev);
cudaFree(_UxnDev);
cudaFree(_UyDev);
cudaFree(_UynDev);
cudaFree(wDev);
cudaFree(wnDev);
cudaFree(psiDev);
cudaFree(psinDev);
cudaFree(prDev);
cudaFree(_TempDev);
cudaFree(_TempnDev);
} |
d02504d5c49e655b156b4a8b8b622d98cfa2848b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#ifndef WIN64
#define EIGEN_DONT_ALIGN_STATICALLY
#endif
#include <Eigen/Dense>
#include <Eigen/Cholesky>
#include <sophus/se3.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
texture<float,2,hipReadModeElementType> texRef;
texture<float,2,hipReadModeElementType> texGradX;
texture<float,2,hipReadModeElementType> texGradY;
#define LVL 4
#define ITER 10
#include "aux.h"
#include "downsample.cuh"
#include "hostFunctions.hpp"
#include "pointCloud.cuh"
#include "deriveNumeric.cuh"
#include "residual.cuh"
#include "tum_benchmark.hpp"
//#include "save_ply.hpp"
#define STR1(x) #x
#define STR(x) STR1(x)
typedef Eigen::Matrix<float, 6, 6> Mat6f;
typedef Eigen::Matrix<float, 6, 1> Vec6f;
void align(const cv::Mat &depthRefIn, const cv::Mat &grayRefIn, const cv::Mat &depthCurIn, const cv::Mat &grayCurIn, Vec6f& xi)
{
// get image dimensions
int w = grayRefIn.cols; // width
int h = grayRefIn.rows; // height
// initialize intrinsic matrix
Eigen::Matrix3f K;
K << 517.3, 0.0, 318.6,
0.0, 516.5, 255.3,
0.0, 0.0, 1.0;
// initial pose
Eigen::Matrix3f rot;
Eigen::Vector3f t;
convertSE3ToTf(xi, rot, t);
//Saving the finest level of images
std::vector<Eigen::Matrix3f> kPyramid;
kPyramid.push_back(K);
std::vector<cv::Mat> grayRefPyramid;
grayRefPyramid.push_back(grayRefIn);
std::vector<cv::Mat> depthRefPyramid;
depthRefPyramid.push_back(depthRefIn);
std::vector<cv::Mat> grayCurPyramid;
grayCurPyramid.push_back(grayCurIn);
std::vector<cv::Mat> depthCurPyramid;
depthCurPyramid.push_back(depthCurIn);
// initialize cuda context
hipDeviceSynchronize(); CUDA_CHECK;
// copy data to device
float nbytes = w*h*sizeof(float);
float *d_imgIn;
hipMalloc(&d_imgIn, nbytes); CUDA_CHECK;
hipMemcpy(d_imgIn, (void*)grayCurIn.data, nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
float *d_refimgIn;
hipMalloc(&d_refimgIn, nbytes); CUDA_CHECK;
hipMemcpy(d_refimgIn, (void*)grayRefIn.data,nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
float *d_depthImgIn;
hipMalloc(&d_depthImgIn, nbytes); CUDA_CHECK;
hipMemcpy(d_depthImgIn, (void*)depthCurIn.data, nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
float *d_refdepthImgIn;
hipMalloc(&d_refdepthImgIn, nbytes); CUDA_CHECK;
hipMemcpy(d_refdepthImgIn, (void*)depthRefIn.data, nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
for(int i = 1; i <LVL ; i++)
{
w = w/2, h = h/2;
// Output graycurr image
float nbytes_scaled = w*h*sizeof(float);
float *d_imgOut;
hipMalloc(&d_imgOut, nbytes_scaled); CUDA_CHECK;
hipMemset(d_imgOut, 0, nbytes_scaled); CUDA_CHECK;
// Output gray reference image
float *d_refimgOut;
hipMalloc(&d_refimgOut, nbytes_scaled); CUDA_CHECK;
hipMemset(d_refimgOut, 0, nbytes_scaled); CUDA_CHECK;
//Output depth current image
float *d_depthImgOut;
hipMalloc(&d_depthImgOut, nbytes_scaled); CUDA_CHECK;
hipMemset(d_depthImgOut, 0, nbytes_scaled); CUDA_CHECK;
//Output refernce depth image
float *d_refdepthImgOut;
hipMalloc(&d_refdepthImgOut, nbytes_scaled); CUDA_CHECK;
hipMemset(d_refdepthImgOut, 0, nbytes_scaled); CUDA_CHECK;
// execute kernel
dim3 block = dim3(32, 8, 1);
dim3 grid = dim3((w+block.x-1)/block.x, (h+block.y-1)/block.y, 1);
hipLaunchKernelGGL(( downSampleGray) , dim3(grid),dim3(block), 0, 0, d_imgOut,d_imgIn,h,w); CUDA_CHECK; //Gray current image
hipLaunchKernelGGL(( downSampleGray) , dim3(grid),dim3(block), 0, 0, d_refimgOut,d_refimgIn,h,w); CUDA_CHECK; //Reference image
hipLaunchKernelGGL(( downSampleDepth) , dim3(grid),dim3(block), 0, 0, d_depthImgOut,d_depthImgIn,h,w); //Current depth image
hipLaunchKernelGGL(( downSampleDepth) , dim3(grid),dim3(block), 0, 0, d_refdepthImgOut,d_refdepthImgIn,h,w); //Reference depth image
K = scaleIntrinsic(K);
kPyramid.push_back(K);
hipDeviceSynchronize();
cv::Mat mOut(h,w,grayCurIn.type());
cv::Mat refmOut(h,w,grayCurIn.type());
cv::Mat depth_mOut(h,w,grayCurIn.type());
cv::Mat refdepth_mOut(h,w,grayCurIn.type());
// copy data back to host
hipMemcpy((void *)mOut.data, d_imgOut, nbytes_scaled , hipMemcpyDeviceToHost); CUDA_CHECK;
grayCurPyramid.push_back(mOut);
hipFree(d_imgOut); CUDA_CHECK;
hipMemcpy((void *)refmOut.data, d_refimgOut,nbytes_scaled , hipMemcpyDeviceToHost); CUDA_CHECK;
grayRefPyramid.push_back(refmOut);
hipFree(d_refimgOut); CUDA_CHECK;
hipMemcpy((void *)depth_mOut.data, d_depthImgOut, nbytes_scaled , hipMemcpyDeviceToHost); CUDA_CHECK;
depthCurPyramid.push_back(depth_mOut);
hipFree(d_depthImgOut); CUDA_CHECK;
hipMemcpy((void *)refdepth_mOut.data, d_refdepthImgOut, nbytes_scaled , hipMemcpyDeviceToHost); CUDA_CHECK;
depthRefPyramid.push_back(refdepth_mOut);
hipFree(d_refdepthImgOut); CUDA_CHECK;
hipFree(d_imgIn); CUDA_CHECK;
hipFree(d_refimgIn); CUDA_CHECK;
hipFree(d_depthImgIn); CUDA_CHECK;
hipFree(d_refdepthImgIn); CUDA_CHECK;
// copy current output to next input
hipMalloc(&d_depthImgIn, nbytes_scaled ); CUDA_CHECK;
hipMemcpy(d_depthImgIn, (void *)depth_mOut.data, nbytes_scaled , hipMemcpyHostToDevice); CUDA_CHECK;
hipMalloc(&d_refdepthImgIn, nbytes_scaled ); CUDA_CHECK;
hipMemcpy(d_refdepthImgIn, (void *)refdepth_mOut.data,nbytes_scaled , hipMemcpyHostToDevice); CUDA_CHECK;
hipMalloc(&d_refimgIn, nbytes_scaled ); CUDA_CHECK;
hipMemcpy(d_refimgIn, (void *)refmOut.data, nbytes_scaled , hipMemcpyHostToDevice); CUDA_CHECK;
hipMalloc(&d_imgIn, nbytes_scaled ); CUDA_CHECK;
hipMemcpy(d_imgIn, (void *)mOut.data, nbytes_scaled , hipMemcpyHostToDevice); CUDA_CHECK;
}
hipFree(d_imgIn); CUDA_CHECK;
hipFree(d_refimgIn); CUDA_CHECK;
hipFree(d_depthImgIn); CUDA_CHECK;
hipFree(d_refdepthImgIn); CUDA_CHECK;
for (int level = LVL; level>0 ; level--)
{
cv::Mat grayRef = grayRefPyramid[level-1];
cv::Mat depthRef = depthRefPyramid[level-1];
cv::Mat grayCur = grayCurPyramid[level-1];
cv::Mat depthCur = depthCurPyramid[level-1];
Eigen::Matrix3f kLevel = kPyramid[level-1];
// get image dimensions
int w = grayRef.cols; // width
int h = grayRef.rows; // height
// initialize cuda context
hipDeviceSynchronize(); CUDA_CHECK;
// copy data to device
// Current grayscale image
float nbytes = w*h*sizeof(float);
float *d_currImg;
hipMalloc(&d_currImg, nbytes); CUDA_CHECK;
hipMemcpy(d_currImg, (void*)grayCur.data, nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
// Reference grayscale image
float *d_refimgIn;
hipMalloc(&d_refimgIn, nbytes); CUDA_CHECK;
hipMemcpy(d_refimgIn, (void*)grayRef.data, nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
//Current depth image
float *d_depthImgIn;
hipMalloc(&d_depthImgIn, nbytes); CUDA_CHECK;
hipMemcpy(d_depthImgIn, (void*)depthCur.data,nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
//Reference depth image
float *d_refdepthImgIn;
hipMalloc(&d_refdepthImgIn, nbytes); CUDA_CHECK;
hipMemcpy(d_refdepthImgIn, (void*)depthRef.data,nbytes,hipMemcpyHostToDevice); CUDA_CHECK;
//Residual Image
float *d_resImg;
hipMalloc(&d_resImg, nbytes); CUDA_CHECK;
hipMemset(d_resImg, 0, nbytes); CUDA_CHECK;
float *d_rot, *d_t;
float fx = kLevel(0,0);
float fy = kLevel(1,1);
float cx = kLevel(0,2);
float cy = kLevel(1,2);
int N = w*h;
float *JTJ = new float[(size_t)N*21];
float *JTB = new float[(size_t)N*6];
float *temp = new float[(size_t)N];
float *result = new float[(size_t)1];
float *A_interim = new float[(size_t)21];
float *b_interim = new float[(size_t)6];
Mat6f A = Mat6f::Zero();
Vec6f b = Vec6f::Zero();
float errLast = std::numeric_limits<float>::max();
for(int i = 0; i < ITER ; i++)
{
float *rot_data = rot.data();
float *t_data = t.data();
float *d_vx, *d_vy, *d_jacobif,*JtJ_final, *Jtb_final, *d_temp, *d_result;
size_t n_d_vx = (size_t)w*h;
size_t n_d_vy = (size_t)w*h;
hipMalloc(&d_jacobif, N*6*sizeof(float)); CUDA_CHECK;
hipMemset(d_jacobif, 0, N*6*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_temp, N*sizeof(float)); CUDA_CHECK;
hipMemset(d_temp, 0, N*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_result, N*sizeof(float)); CUDA_CHECK;
hipMemset(d_result, 0, N*sizeof(float)); CUDA_CHECK;
hipMalloc(&JtJ_final, N*21*sizeof(float)); CUDA_CHECK;
hipMemset(JtJ_final, 0, N*21*sizeof(float)); CUDA_CHECK;
hipMalloc(&Jtb_final, N*6*sizeof(float)); CUDA_CHECK;
hipMemset(Jtb_final, 0, N*6*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_vx, n_d_vx*sizeof(float)); CUDA_CHECK;
hipMemset(d_vx, 0, n_d_vx*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_vy, n_d_vy*sizeof(float)); CUDA_CHECK;
hipMemset(d_vy, 0, n_d_vy*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_rot, 9*sizeof(float)); CUDA_CHECK;
hipMemcpy(d_rot,rot_data,9*sizeof(float),hipMemcpyHostToDevice); CUDA_CHECK;
hipMalloc(&d_t, 3*sizeof(float)); CUDA_CHECK;
hipMemcpy(d_t,t_data,3*sizeof(float),hipMemcpyHostToDevice); CUDA_CHECK;
dim3 block = dim3(32, 8, 1);
dim3 grid = dim3((w+block.x-1)/block.x, (h+block.y-1)/block.y, 1);
//Texture Memory
texRef.addressMode[0] = hipAddressModeClamp;
texRef.addressMode[1] = hipAddressModeClamp;
texRef.filterMode = hipFilterModeLinear;
texRef.normalized = false;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipBindTexture2D(NULL, &texRef, d_currImg, &desc, w, h, w*sizeof(d_currImg[0]));
texGradX.addressMode[0] = hipAddressModeClamp;
texGradX.addressMode[1] = hipAddressModeClamp;
texGradX.filterMode = hipFilterModeLinear;
texGradX.normalized = false;
hipChannelFormatDesc descX = hipCreateChannelDesc<float>();
hipBindTexture2D(NULL, &texGradX, d_vx, &descX, w, h, w*sizeof(d_vx[0]));
texGradY.addressMode[0] = hipAddressModeClamp;
texGradY.addressMode[1] = hipAddressModeClamp;
texGradY.filterMode = hipFilterModeLinear;
texGradY.normalized = false;
hipChannelFormatDesc descY = hipCreateChannelDesc<float>();
hipBindTexture2D(NULL, &texGradY, d_vy, &descY, w, h, w*sizeof(d_vy[0]));
//hipMemset(d_imgOut, 0, nbytes); CUDA_CHECK;
hipMemset(d_resImg, 0, nbytes); CUDA_CHECK;
hipLaunchKernelGGL(( calcErr) , dim3(grid),dim3(block), 0, 0, d_refimgIn,d_currImg,d_refdepthImgIn,d_resImg,d_rot,d_t,fx,fy,cx,cy,w,h);
CUDA_CHECK;
hipLaunchKernelGGL(( gradCompute) , dim3(grid),dim3(block), 0, 0, d_currImg,d_vx,d_vy,w,h); CUDA_CHECK;
hipLaunchKernelGGL(( deriveNumeric) , dim3(grid),dim3(block), 0, 0, d_vx,d_vy,d_refdepthImgIn,d_resImg,d_jacobif,w,h,fx,fy,cx,cy,d_rot,d_t,JtJ_final,Jtb_final);
CUDA_CHECK;
cv::Mat residualGPU(h,w,grayRef.type());
hipMemcpy((void *)residualGPU.data,d_resImg,N*sizeof(float),hipMemcpyDeviceToHost); CUDA_CHECK;
Eigen::VectorXf residual(N);
int idx = 0;
for(int i =0 ;i<w;i++)
{
for(int j =0 ;j<h;j++)
{
residual[idx] = residualGPU.at<float>(i,j);
idx++;
}
}
dim3 block1 = dim3(128, 1, 1);
dim3 grid1 = dim3((N + block1.x -1)/block1.x,1,1);
size_t smBytes = block1.x * block1.y * block1.z * sizeof(float);
//Reduction for JtJ
float* ptrJtJ = JtJ_final;
for(int j = 0; j<21; j++)
{
hipLaunchKernelGGL(( block_sum) , dim3(grid1),dim3(block1),smBytes, 0, ptrJtJ,ptrJtJ,N);
for(int offset = block1.x / 2;offset > 0;offset >>= 1)
hipLaunchKernelGGL(( block_sum) , dim3(grid1),dim3(block1),smBytes, 0, ptrJtJ,ptrJtJ,N);
hipMemcpy(result,ptrJtJ,sizeof(float),hipMemcpyDeviceToHost); CUDA_CHECK;
A_interim[j]= result[0];
ptrJtJ = ptrJtJ + N;
}
int k = 0;
for(int i = 0; i<6; i++)
{
for(int j = i; j<6; j++)
{
A(i,j) = A(j,i) =A_interim[k];
k++;
}
}
//Reduction for Jtb
float *ptrJtb = Jtb_final;
for(int j = 0; j<6; j++)
{
hipLaunchKernelGGL(( block_sum) , dim3(grid1),dim3(block1),smBytes, 0, ptrJtb,ptrJtb,N);
for(int offset = block1.x / 2;offset > 0;offset >>= 1)
hipLaunchKernelGGL(( block_sum) , dim3(grid1),dim3(block1),smBytes, 0, ptrJtb,ptrJtb,N);
hipMemcpy(result,ptrJtb,sizeof(float),hipMemcpyDeviceToHost); CUDA_CHECK;
b_interim[j]= result[0];
ptrJtb = ptrJtb + N;
}
for(int i = 0; i<6; i++)
b(i) = b_interim[i];
// solve using Cholesky LDLT decomposition
Vec6f delta = -(A.ldlt().solve(b));
// update xi
xi = Sophus::SE3f::log(Sophus::SE3f::exp(delta)*Sophus::SE3f::exp(xi));
hipUnbindTexture(texRef);
hipUnbindTexture(texGradX);
hipUnbindTexture(texGradY);
hipFree(d_rot); CUDA_CHECK;
hipFree(d_t); CUDA_CHECK;
hipFree(d_vx); CUDA_CHECK;
hipFree(d_vy); CUDA_CHECK;
hipFree(d_jacobif); CUDA_CHECK;
hipFree(d_result); CUDA_CHECK;
hipFree(d_temp); CUDA_CHECK;
hipFree(JtJ_final); CUDA_CHECK;
hipFree(Jtb_final); CUDA_CHECK;
// print out final pose
convertSE3ToTf(xi, rot, t);
float error = (residual.cwiseProduct(residual)).mean();
if((error/errLast) > 0.995f)
break;
errLast = error;
}
delete[] JTJ;
delete[] JTB;
delete[] temp;
delete[] result;
delete[] A_interim;
delete[] b_interim;
hipFree(d_currImg); CUDA_CHECK;
hipFree(d_refimgIn); CUDA_CHECK;
hipFree(d_depthImgIn); CUDA_CHECK;
hipFree(d_refdepthImgIn); CUDA_CHECK;
hipFree(d_resImg); CUDA_CHECK;
}
}
int main(int argc, char *argv[])
{
std::string dataFolder = std::string(STR(DVO_CUDA_SOURCE_DIR)) + "/data/";
// load file names
std::string assocFile = dataFolder + "rgbd_assoc.txt";
std::vector<std::string> filesColor;
std::vector<std::string> filesDepth;
std::vector<double> timestampsDepth;
std::vector<double> timestampsColor;
if (!loadAssoc(assocFile, filesDepth, filesColor, timestampsDepth, timestampsColor))
{
std::cout << "Assoc file could not be loaded!" << std::endl;
return 1;
}
// initialize cuda context
hipDeviceSynchronize(); CUDA_CHECK;
int maxFrames = -1;
//maxFrames = 50;
// process frames
Eigen::Matrix4f absPose = Eigen::Matrix4f::Identity();
std::vector<Eigen::Matrix4f> poses;
std::vector<double> timestamps;
poses.push_back(absPose);
timestamps.push_back(timestampsDepth[0]);
cv::Mat colorPrev = loadColor(dataFolder + filesColor[0]);
cv::Mat grayPrev;
cv::cvtColor(colorPrev, grayPrev, CV_BGR2GRAY);
cv::Mat depthPrev = loadDepth(dataFolder + filesDepth[0]);
for (size_t i = 1; i < filesDepth.size() && (maxFrames < 0 || i < maxFrames); ++i)
{
// load input frame
std::string fileColor1 = filesColor[i];
std::string fileDepth1 = filesDepth[i];
double timeDepth1 = timestampsDepth[i];
cv::Mat color0 = colorPrev;
cv::Mat depth0 = depthPrev;
cv::Mat gray0 = grayPrev;
cv::Mat color1 = loadColor(dataFolder + fileColor1);
cv::Mat depth1 = loadDepth(dataFolder + fileDepth1);
cv::Mat gray1;
cv::cvtColor(color1, gray1, CV_BGR2GRAY);
cv::Mat grayRef = gray0;
cv::Mat depthRef = depth0;
cv::Mat grayCur = gray1;
cv::Mat depthCur = depth1;
// frame alignment
Vec6f xi = Vec6f::Zero();
Timer timer; timer.start();
align(depthRef, grayRef, depthCur, grayCur, xi);
timer.end(); float time = timer.get();
std::cout <<"Time : " << time*1000 << std::endl;
Eigen::Matrix3f rot;
Eigen::Vector3f t;
convertSE3ToTf(xi, rot, t);
std::cout << "pose (xi) between frames " << (i-1) << " and " << i << ": " << xi.transpose() << std::endl;
// concatenate poses
Eigen::Matrix4f relPose = Eigen::Matrix4f::Identity();
relPose.topLeftCorner(3,3) = rot;
relPose.topRightCorner(3,1) = t;
absPose = absPose * relPose.inverse();
poses.push_back(absPose);
timestamps.push_back(timeDepth1);
#if 0
// save frames as point cloud
rot = absPose.topLeftCorner(3,3);
t = absPose.topRightCorner(3,1);
cv::Mat vertexMap1;
depthToVertexMap(K.cast<double>(), depth1, vertexMap1);
transformVertexMap(rot.cast<double>(), t.cast<double>(), vertexMap1);
cv::Mat color1UC;
color1.convertTo(color1UC, CV_8UC3, 255.0f);
std::stringstream ss;
ss << dataFolder << "cloud_" << std::setw(4) << std::setfill('0') << i << ".ply";
savePlyFile(ss.str(), color1UC, vertexMap1);
#endif
colorPrev = color1;
depthPrev = depth1;
grayPrev = gray1;
}
// save poses
savePoses(dataFolder + "traj.txt", poses, timestamps);
// clean up
cv::destroyAllWindows();
std::cout << "Direct Image Alignment finished." << std::endl;
return 0;
}
| d02504d5c49e655b156b4a8b8b622d98cfa2848b.cu | #include <iostream>
#include <vector>
#ifndef WIN64
#define EIGEN_DONT_ALIGN_STATICALLY
#endif
#include <Eigen/Dense>
#include <Eigen/Cholesky>
#include <sophus/se3.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
texture<float,2,cudaReadModeElementType> texRef;
texture<float,2,cudaReadModeElementType> texGradX;
texture<float,2,cudaReadModeElementType> texGradY;
#define LVL 4
#define ITER 10
#include "aux.h"
#include "downsample.cuh"
#include "hostFunctions.hpp"
#include "pointCloud.cuh"
#include "deriveNumeric.cuh"
#include "residual.cuh"
#include "tum_benchmark.hpp"
//#include "save_ply.hpp"
#define STR1(x) #x
#define STR(x) STR1(x)
typedef Eigen::Matrix<float, 6, 6> Mat6f;
typedef Eigen::Matrix<float, 6, 1> Vec6f;
void align(const cv::Mat &depthRefIn, const cv::Mat &grayRefIn, const cv::Mat &depthCurIn, const cv::Mat &grayCurIn, Vec6f& xi)
{
// get image dimensions
int w = grayRefIn.cols; // width
int h = grayRefIn.rows; // height
// initialize intrinsic matrix
Eigen::Matrix3f K;
K << 517.3, 0.0, 318.6,
0.0, 516.5, 255.3,
0.0, 0.0, 1.0;
// initial pose
Eigen::Matrix3f rot;
Eigen::Vector3f t;
convertSE3ToTf(xi, rot, t);
//Saving the finest level of images
std::vector<Eigen::Matrix3f> kPyramid;
kPyramid.push_back(K);
std::vector<cv::Mat> grayRefPyramid;
grayRefPyramid.push_back(grayRefIn);
std::vector<cv::Mat> depthRefPyramid;
depthRefPyramid.push_back(depthRefIn);
std::vector<cv::Mat> grayCurPyramid;
grayCurPyramid.push_back(grayCurIn);
std::vector<cv::Mat> depthCurPyramid;
depthCurPyramid.push_back(depthCurIn);
// initialize cuda context
cudaDeviceSynchronize(); CUDA_CHECK;
// copy data to device
float nbytes = w*h*sizeof(float);
float *d_imgIn;
cudaMalloc(&d_imgIn, nbytes); CUDA_CHECK;
cudaMemcpy(d_imgIn, (void*)grayCurIn.data, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
float *d_refimgIn;
cudaMalloc(&d_refimgIn, nbytes); CUDA_CHECK;
cudaMemcpy(d_refimgIn, (void*)grayRefIn.data,nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
float *d_depthImgIn;
cudaMalloc(&d_depthImgIn, nbytes); CUDA_CHECK;
cudaMemcpy(d_depthImgIn, (void*)depthCurIn.data, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
float *d_refdepthImgIn;
cudaMalloc(&d_refdepthImgIn, nbytes); CUDA_CHECK;
cudaMemcpy(d_refdepthImgIn, (void*)depthRefIn.data, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
for(int i = 1; i <LVL ; i++)
{
w = w/2, h = h/2;
// Output graycurr image
float nbytes_scaled = w*h*sizeof(float);
float *d_imgOut;
cudaMalloc(&d_imgOut, nbytes_scaled); CUDA_CHECK;
cudaMemset(d_imgOut, 0, nbytes_scaled); CUDA_CHECK;
// Output gray reference image
float *d_refimgOut;
cudaMalloc(&d_refimgOut, nbytes_scaled); CUDA_CHECK;
cudaMemset(d_refimgOut, 0, nbytes_scaled); CUDA_CHECK;
//Output depth current image
float *d_depthImgOut;
cudaMalloc(&d_depthImgOut, nbytes_scaled); CUDA_CHECK;
cudaMemset(d_depthImgOut, 0, nbytes_scaled); CUDA_CHECK;
//Output refernce depth image
float *d_refdepthImgOut;
cudaMalloc(&d_refdepthImgOut, nbytes_scaled); CUDA_CHECK;
cudaMemset(d_refdepthImgOut, 0, nbytes_scaled); CUDA_CHECK;
// execute kernel
dim3 block = dim3(32, 8, 1);
dim3 grid = dim3((w+block.x-1)/block.x, (h+block.y-1)/block.y, 1);
downSampleGray <<<grid,block>>> (d_imgOut,d_imgIn,h,w); CUDA_CHECK; //Gray current image
downSampleGray <<<grid,block>>> (d_refimgOut,d_refimgIn,h,w); CUDA_CHECK; //Reference image
downSampleDepth <<<grid,block>>> (d_depthImgOut,d_depthImgIn,h,w); //Current depth image
downSampleDepth <<<grid,block>>> (d_refdepthImgOut,d_refdepthImgIn,h,w); //Reference depth image
K = scaleIntrinsic(K);
kPyramid.push_back(K);
cudaDeviceSynchronize();
cv::Mat mOut(h,w,grayCurIn.type());
cv::Mat refmOut(h,w,grayCurIn.type());
cv::Mat depth_mOut(h,w,grayCurIn.type());
cv::Mat refdepth_mOut(h,w,grayCurIn.type());
// copy data back to host
cudaMemcpy((void *)mOut.data, d_imgOut, nbytes_scaled , cudaMemcpyDeviceToHost); CUDA_CHECK;
grayCurPyramid.push_back(mOut);
cudaFree(d_imgOut); CUDA_CHECK;
cudaMemcpy((void *)refmOut.data, d_refimgOut,nbytes_scaled , cudaMemcpyDeviceToHost); CUDA_CHECK;
grayRefPyramid.push_back(refmOut);
cudaFree(d_refimgOut); CUDA_CHECK;
cudaMemcpy((void *)depth_mOut.data, d_depthImgOut, nbytes_scaled , cudaMemcpyDeviceToHost); CUDA_CHECK;
depthCurPyramid.push_back(depth_mOut);
cudaFree(d_depthImgOut); CUDA_CHECK;
cudaMemcpy((void *)refdepth_mOut.data, d_refdepthImgOut, nbytes_scaled , cudaMemcpyDeviceToHost); CUDA_CHECK;
depthRefPyramid.push_back(refdepth_mOut);
cudaFree(d_refdepthImgOut); CUDA_CHECK;
cudaFree(d_imgIn); CUDA_CHECK;
cudaFree(d_refimgIn); CUDA_CHECK;
cudaFree(d_depthImgIn); CUDA_CHECK;
cudaFree(d_refdepthImgIn); CUDA_CHECK;
// copy current output to next input
cudaMalloc(&d_depthImgIn, nbytes_scaled ); CUDA_CHECK;
cudaMemcpy(d_depthImgIn, (void *)depth_mOut.data, nbytes_scaled , cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMalloc(&d_refdepthImgIn, nbytes_scaled ); CUDA_CHECK;
cudaMemcpy(d_refdepthImgIn, (void *)refdepth_mOut.data,nbytes_scaled , cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMalloc(&d_refimgIn, nbytes_scaled ); CUDA_CHECK;
cudaMemcpy(d_refimgIn, (void *)refmOut.data, nbytes_scaled , cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMalloc(&d_imgIn, nbytes_scaled ); CUDA_CHECK;
cudaMemcpy(d_imgIn, (void *)mOut.data, nbytes_scaled , cudaMemcpyHostToDevice); CUDA_CHECK;
}
cudaFree(d_imgIn); CUDA_CHECK;
cudaFree(d_refimgIn); CUDA_CHECK;
cudaFree(d_depthImgIn); CUDA_CHECK;
cudaFree(d_refdepthImgIn); CUDA_CHECK;
for (int level = LVL; level>0 ; level--)
{
cv::Mat grayRef = grayRefPyramid[level-1];
cv::Mat depthRef = depthRefPyramid[level-1];
cv::Mat grayCur = grayCurPyramid[level-1];
cv::Mat depthCur = depthCurPyramid[level-1];
Eigen::Matrix3f kLevel = kPyramid[level-1];
// get image dimensions
int w = grayRef.cols; // width
int h = grayRef.rows; // height
// initialize cuda context
cudaDeviceSynchronize(); CUDA_CHECK;
// copy data to device
// Current grayscale image
float nbytes = w*h*sizeof(float);
float *d_currImg;
cudaMalloc(&d_currImg, nbytes); CUDA_CHECK;
cudaMemcpy(d_currImg, (void*)grayCur.data, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
// Reference grayscale image
float *d_refimgIn;
cudaMalloc(&d_refimgIn, nbytes); CUDA_CHECK;
cudaMemcpy(d_refimgIn, (void*)grayRef.data, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
//Current depth image
float *d_depthImgIn;
cudaMalloc(&d_depthImgIn, nbytes); CUDA_CHECK;
cudaMemcpy(d_depthImgIn, (void*)depthCur.data,nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
//Reference depth image
float *d_refdepthImgIn;
cudaMalloc(&d_refdepthImgIn, nbytes); CUDA_CHECK;
cudaMemcpy(d_refdepthImgIn, (void*)depthRef.data,nbytes,cudaMemcpyHostToDevice); CUDA_CHECK;
//Residual Image
float *d_resImg;
cudaMalloc(&d_resImg, nbytes); CUDA_CHECK;
cudaMemset(d_resImg, 0, nbytes); CUDA_CHECK;
float *d_rot, *d_t;
float fx = kLevel(0,0);
float fy = kLevel(1,1);
float cx = kLevel(0,2);
float cy = kLevel(1,2);
int N = w*h;
float *JTJ = new float[(size_t)N*21];
float *JTB = new float[(size_t)N*6];
float *temp = new float[(size_t)N];
float *result = new float[(size_t)1];
float *A_interim = new float[(size_t)21];
float *b_interim = new float[(size_t)6];
Mat6f A = Mat6f::Zero();
Vec6f b = Vec6f::Zero();
float errLast = std::numeric_limits<float>::max();
for(int i = 0; i < ITER ; i++)
{
float *rot_data = rot.data();
float *t_data = t.data();
float *d_vx, *d_vy, *d_jacobif,*JtJ_final, *Jtb_final, *d_temp, *d_result;
size_t n_d_vx = (size_t)w*h;
size_t n_d_vy = (size_t)w*h;
cudaMalloc(&d_jacobif, N*6*sizeof(float)); CUDA_CHECK;
cudaMemset(d_jacobif, 0, N*6*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_temp, N*sizeof(float)); CUDA_CHECK;
cudaMemset(d_temp, 0, N*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_result, N*sizeof(float)); CUDA_CHECK;
cudaMemset(d_result, 0, N*sizeof(float)); CUDA_CHECK;
cudaMalloc(&JtJ_final, N*21*sizeof(float)); CUDA_CHECK;
cudaMemset(JtJ_final, 0, N*21*sizeof(float)); CUDA_CHECK;
cudaMalloc(&Jtb_final, N*6*sizeof(float)); CUDA_CHECK;
cudaMemset(Jtb_final, 0, N*6*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_vx, n_d_vx*sizeof(float)); CUDA_CHECK;
cudaMemset(d_vx, 0, n_d_vx*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_vy, n_d_vy*sizeof(float)); CUDA_CHECK;
cudaMemset(d_vy, 0, n_d_vy*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_rot, 9*sizeof(float)); CUDA_CHECK;
cudaMemcpy(d_rot,rot_data,9*sizeof(float),cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMalloc(&d_t, 3*sizeof(float)); CUDA_CHECK;
cudaMemcpy(d_t,t_data,3*sizeof(float),cudaMemcpyHostToDevice); CUDA_CHECK;
dim3 block = dim3(32, 8, 1);
dim3 grid = dim3((w+block.x-1)/block.x, (h+block.y-1)/block.y, 1);
//Texture Memory
texRef.addressMode[0] = cudaAddressModeClamp;
texRef.addressMode[1] = cudaAddressModeClamp;
texRef.filterMode = cudaFilterModeLinear;
texRef.normalized = false;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(NULL, &texRef, d_currImg, &desc, w, h, w*sizeof(d_currImg[0]));
texGradX.addressMode[0] = cudaAddressModeClamp;
texGradX.addressMode[1] = cudaAddressModeClamp;
texGradX.filterMode = cudaFilterModeLinear;
texGradX.normalized = false;
cudaChannelFormatDesc descX = cudaCreateChannelDesc<float>();
cudaBindTexture2D(NULL, &texGradX, d_vx, &descX, w, h, w*sizeof(d_vx[0]));
texGradY.addressMode[0] = cudaAddressModeClamp;
texGradY.addressMode[1] = cudaAddressModeClamp;
texGradY.filterMode = cudaFilterModeLinear;
texGradY.normalized = false;
cudaChannelFormatDesc descY = cudaCreateChannelDesc<float>();
cudaBindTexture2D(NULL, &texGradY, d_vy, &descY, w, h, w*sizeof(d_vy[0]));
//cudaMemset(d_imgOut, 0, nbytes); CUDA_CHECK;
cudaMemset(d_resImg, 0, nbytes); CUDA_CHECK;
calcErr <<<grid,block>>> (d_refimgIn,d_currImg,d_refdepthImgIn,d_resImg,d_rot,d_t,fx,fy,cx,cy,w,h);
CUDA_CHECK;
gradCompute <<<grid,block>>> (d_currImg,d_vx,d_vy,w,h); CUDA_CHECK;
deriveNumeric <<<grid,block>>>(d_vx,d_vy,d_refdepthImgIn,d_resImg,d_jacobif,w,h,fx,fy,cx,cy,d_rot,d_t,JtJ_final,Jtb_final);
CUDA_CHECK;
cv::Mat residualGPU(h,w,grayRef.type());
cudaMemcpy((void *)residualGPU.data,d_resImg,N*sizeof(float),cudaMemcpyDeviceToHost); CUDA_CHECK;
Eigen::VectorXf residual(N);
int idx = 0;
for(int i =0 ;i<w;i++)
{
for(int j =0 ;j<h;j++)
{
residual[idx] = residualGPU.at<float>(i,j);
idx++;
}
}
dim3 block1 = dim3(128, 1, 1);
dim3 grid1 = dim3((N + block1.x -1)/block1.x,1,1);
size_t smBytes = block1.x * block1.y * block1.z * sizeof(float);
//Reduction for JtJ
float* ptrJtJ = JtJ_final;
for(int j = 0; j<21; j++)
{
block_sum <<<grid1,block1,smBytes>>> (ptrJtJ,ptrJtJ,N);
for(int offset = block1.x / 2;offset > 0;offset >>= 1)
block_sum <<<grid1,block1,smBytes>>> (ptrJtJ,ptrJtJ,N);
cudaMemcpy(result,ptrJtJ,sizeof(float),cudaMemcpyDeviceToHost); CUDA_CHECK;
A_interim[j]= result[0];
ptrJtJ = ptrJtJ + N;
}
int k = 0;
for(int i = 0; i<6; i++)
{
for(int j = i; j<6; j++)
{
A(i,j) = A(j,i) =A_interim[k];
k++;
}
}
//Reduction for Jtb
float *ptrJtb = Jtb_final;
for(int j = 0; j<6; j++)
{
block_sum <<<grid1,block1,smBytes>>> (ptrJtb,ptrJtb,N);
for(int offset = block1.x / 2;offset > 0;offset >>= 1)
block_sum <<<grid1,block1,smBytes>>> (ptrJtb,ptrJtb,N);
cudaMemcpy(result,ptrJtb,sizeof(float),cudaMemcpyDeviceToHost); CUDA_CHECK;
b_interim[j]= result[0];
ptrJtb = ptrJtb + N;
}
for(int i = 0; i<6; i++)
b(i) = b_interim[i];
// solve using Cholesky LDLT decomposition
Vec6f delta = -(A.ldlt().solve(b));
// update xi
xi = Sophus::SE3f::log(Sophus::SE3f::exp(delta)*Sophus::SE3f::exp(xi));
cudaUnbindTexture(texRef);
cudaUnbindTexture(texGradX);
cudaUnbindTexture(texGradY);
cudaFree(d_rot); CUDA_CHECK;
cudaFree(d_t); CUDA_CHECK;
cudaFree(d_vx); CUDA_CHECK;
cudaFree(d_vy); CUDA_CHECK;
cudaFree(d_jacobif); CUDA_CHECK;
cudaFree(d_result); CUDA_CHECK;
cudaFree(d_temp); CUDA_CHECK;
cudaFree(JtJ_final); CUDA_CHECK;
cudaFree(Jtb_final); CUDA_CHECK;
// print out final pose
convertSE3ToTf(xi, rot, t);
float error = (residual.cwiseProduct(residual)).mean();
if((error/errLast) > 0.995f)
break;
errLast = error;
}
delete[] JTJ;
delete[] JTB;
delete[] temp;
delete[] result;
delete[] A_interim;
delete[] b_interim;
cudaFree(d_currImg); CUDA_CHECK;
cudaFree(d_refimgIn); CUDA_CHECK;
cudaFree(d_depthImgIn); CUDA_CHECK;
cudaFree(d_refdepthImgIn); CUDA_CHECK;
cudaFree(d_resImg); CUDA_CHECK;
}
}
int main(int argc, char *argv[])
{
std::string dataFolder = std::string(STR(DVO_CUDA_SOURCE_DIR)) + "/data/";
// load file names
std::string assocFile = dataFolder + "rgbd_assoc.txt";
std::vector<std::string> filesColor;
std::vector<std::string> filesDepth;
std::vector<double> timestampsDepth;
std::vector<double> timestampsColor;
if (!loadAssoc(assocFile, filesDepth, filesColor, timestampsDepth, timestampsColor))
{
std::cout << "Assoc file could not be loaded!" << std::endl;
return 1;
}
// initialize cuda context
cudaDeviceSynchronize(); CUDA_CHECK;
int maxFrames = -1;
//maxFrames = 50;
// process frames
Eigen::Matrix4f absPose = Eigen::Matrix4f::Identity();
std::vector<Eigen::Matrix4f> poses;
std::vector<double> timestamps;
poses.push_back(absPose);
timestamps.push_back(timestampsDepth[0]);
cv::Mat colorPrev = loadColor(dataFolder + filesColor[0]);
cv::Mat grayPrev;
cv::cvtColor(colorPrev, grayPrev, CV_BGR2GRAY);
cv::Mat depthPrev = loadDepth(dataFolder + filesDepth[0]);
for (size_t i = 1; i < filesDepth.size() && (maxFrames < 0 || i < maxFrames); ++i)
{
// load input frame
std::string fileColor1 = filesColor[i];
std::string fileDepth1 = filesDepth[i];
double timeDepth1 = timestampsDepth[i];
cv::Mat color0 = colorPrev;
cv::Mat depth0 = depthPrev;
cv::Mat gray0 = grayPrev;
cv::Mat color1 = loadColor(dataFolder + fileColor1);
cv::Mat depth1 = loadDepth(dataFolder + fileDepth1);
cv::Mat gray1;
cv::cvtColor(color1, gray1, CV_BGR2GRAY);
cv::Mat grayRef = gray0;
cv::Mat depthRef = depth0;
cv::Mat grayCur = gray1;
cv::Mat depthCur = depth1;
// frame alignment
Vec6f xi = Vec6f::Zero();
Timer timer; timer.start();
align(depthRef, grayRef, depthCur, grayCur, xi);
timer.end(); float time = timer.get();
std::cout <<"Time : " << time*1000 << std::endl;
Eigen::Matrix3f rot;
Eigen::Vector3f t;
convertSE3ToTf(xi, rot, t);
std::cout << "pose (xi) between frames " << (i-1) << " and " << i << ": " << xi.transpose() << std::endl;
// concatenate poses
Eigen::Matrix4f relPose = Eigen::Matrix4f::Identity();
relPose.topLeftCorner(3,3) = rot;
relPose.topRightCorner(3,1) = t;
absPose = absPose * relPose.inverse();
poses.push_back(absPose);
timestamps.push_back(timeDepth1);
#if 0
// save frames as point cloud
rot = absPose.topLeftCorner(3,3);
t = absPose.topRightCorner(3,1);
cv::Mat vertexMap1;
depthToVertexMap(K.cast<double>(), depth1, vertexMap1);
transformVertexMap(rot.cast<double>(), t.cast<double>(), vertexMap1);
cv::Mat color1UC;
color1.convertTo(color1UC, CV_8UC3, 255.0f);
std::stringstream ss;
ss << dataFolder << "cloud_" << std::setw(4) << std::setfill('0') << i << ".ply";
savePlyFile(ss.str(), color1UC, vertexMap1);
#endif
colorPrev = color1;
depthPrev = depth1;
grayPrev = gray1;
}
// save poses
savePoses(dataFolder + "traj.txt", poses, timestamps);
// clean up
cv::destroyAllWindows();
std::cout << "Direct Image Alignment finished." << std::endl;
return 0;
}
|
77843a63d98b5adebbf91260364383f400ea1f84.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <hip/hip_runtime.h>
#define N (1 << 12)
#define tile_size 32
#define block_size tile_size
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void fillArray(float *arr){
for(int i = 0; i < N*N; i++){
arr[i] = rand() % 100;
//arr[i] = i;
}
}
void seqMatrixMul(float *a1, float *a2, float *aout){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
aout[i*N+j]=0.0;
for(int k = 0; k < N; k++){
aout[N*i+j] += a1[N*i+k]*a2[N*k+j];
}
}
}
}
void wrongNumberCheck(float *a1, float *a2){
int bad = 0;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
if(a1[N*i + j] != a2[N*i + j]){
bad = bad + 1;
}
}
}
printf("Number of wrong multiplications = %i\n", bad);
}
int mulCheck(float *a1, float *a2){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
if(a1[N*i + j] != a2[N*i + j]){
printf("Matrix Multiplication Failed!\n");
printf("index = %i \n", N*i + j);
printf("expected = %f\nreceived = %f\n", a1[N*i + j], a2[N*i+j]);
printf("Next element...\n");
printf("expected = %f\nreceived = %f\n", a1[N*i + j+1], a2[N*i+j+1]);
printf("Checking for number of wrong multiplications...\n");
wrongNumberCheck(a1, a2);
return 1;
}
}
}
printf("Matrix Multiplication Successful!\n");
return 0;
}
int mulCheck2(float *a1, float *a2){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
if(abs(a1[N*i + j] - a2[N*i + j]) > 1e-8){
printf("Matrix Multiplication Failed!\n");
printf("index = %i \n", N*i + j);
printf("row = %i, col = %i\n", i, j);
printf("expected = %f\nreceived = %f\n", a1[N*i + j], a2[N*i+j]);
printf("Next element...\n");
printf("expected = %f\nreceived = %f\n", a1[N*i + j+1], a2[N*i+j+1]);
printf("Checking for number of wrong multiplications...\n");
wrongNumberCheck(a1, a2);
return 1;
}
}
}
printf("Matrix Multiplication Successful!\n");
return 0;
}
__global__ void gpuMatMul(float *a1, float *a2, float *aout){
__shared__ float A[tile_size][tile_size];
__shared__ float B[tile_size][tile_size];
int tc = threadIdx.x;
int tr = threadIdx.y;
int c = blockIdx.x*tile_size + threadIdx.x;
int r = blockIdx.y*tile_size + threadIdx.y;
float sum_val = 0;
for(int i = 0; i < N; i += tile_size){
A[tr][tc] = a1[N*r + i + tc];
B[tr][tc] = a2[c + N*(i + tr)];
__syncthreads();
for(int j = 0; j < tile_size; j++){
sum_val += A[tr][j]*B[j][tc];
}
__syncthreads();
}
aout[N*r + c] = sum_val;
}
int main(void){
// Setup time variables
float timecpu = 0;
float timegpu = 0;
float tpcpu = 0;
float tpgpu = 0;
hipEvent_t launch_begin_seq, launch_end_seq;
// Host variables
float *h_arr1 = (float*)malloc(N*N*sizeof(float));
float *h_arr2 = (float*)malloc(N*N*sizeof(float));
float *h_out = (float*)malloc(N*N*sizeof(float));
float *h_save = (float*)malloc(N*N*sizeof(float));
//Device variables
float *d_arr1, *d_arr2, *d_out;
hipMalloc((void**)&d_arr1, N*N*sizeof(float));
hipMalloc((void**)&d_arr2, N*N*sizeof(float));
hipMalloc((void**)&d_out, N*N*sizeof(float));
// Check Memory Allocation
if(h_arr1 == 0 || h_arr2 == 0 || h_out == 0 || h_save == 0 || d_arr1 == 0 || d_arr2 == 0 || d_out == 0){
printf("Memory Allocation Failed!\n");
return 1;
}
// Fill Array
fillArray(h_arr1);
fillArray(h_arr2);
memset(h_out, 0, N*N*sizeof(float));
memset(h_save, 0, N*N*sizeof(float));
// Create time variables
hipEventCreate(&launch_begin_seq);
hipEventCreate(&launch_end_seq);
//Start CPU Transpose
hipEventRecord(launch_begin_seq,0);
seqMatrixMul(h_arr1, h_arr2, h_save);
hipEventRecord(launch_end_seq,0);
hipEventSynchronize(launch_end_seq);
hipEventElapsedTime(&timecpu, launch_begin_seq, launch_end_seq);
printf("CPU time: %f ms\n", timecpu);
tpcpu = 1e-9*2*N/(timecpu*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpcpu);
// Prep Grid and Block variables
dim3 dimGrid(N/tile_size, N/tile_size, 1);
dim3 dimBlock(tile_size, block_size, 1);
// Prep device memory
hipMemset(d_arr1, 0, N*N*sizeof(float));
hipMemset(d_arr2, 0, N*N*sizeof(float));
hipMemcpy(d_arr1, h_arr1, N*N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_arr2, h_arr2, N*N*sizeof(float), hipMemcpyHostToDevice);
hipMemset(d_out, 0, N*N*sizeof(float));
// Create time variables
hipEventCreate(&launch_begin_seq);
hipEventCreate(&launch_end_seq);
// Start global GPU multiplication
hipEventRecord(launch_begin_seq,0);
hipLaunchKernelGGL(( gpuMatMul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_arr1, d_arr2, d_out);
hipEventRecord(launch_end_seq,0);
hipEventSynchronize(launch_end_seq);
// Copy Memory back to Host
hipMemcpy(h_out, d_out, N*N*sizeof(float), hipMemcpyDeviceToHost);
// Check For Cuda Errors
checkCUDAError("gpuMatMul");
if(mulCheck2(h_save, h_out) == 0){
hipEventElapsedTime(&timegpu, launch_begin_seq, launch_end_seq);
printf("GPU time: %f ms\n", timegpu);
tpgpu = 1e-9*2*N/(timegpu*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpgpu);
}
printf("Speed up = %f \n", timecpu/timegpu);
printf("ratio = %f \n\n", tpgpu/tpcpu);
printf("CSV output:\n");
printf("%i,%i,%i,%f,%f,%f,%f,%f,%f", N, tile_size, block_size, timecpu, timegpu, tpcpu, tpgpu, timecpu/timegpu, tpgpu/tpcpu);
free(h_arr1);
free(h_arr2);
free(h_out);
free(h_save);
hipFree(d_arr1);
hipFree(d_arr2);
hipFree(d_out);
return 0;
}
| 77843a63d98b5adebbf91260364383f400ea1f84.cu | #include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <cuda_runtime.h>
#define N (1 << 12)
#define tile_size 32
#define block_size tile_size
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void fillArray(float *arr){
for(int i = 0; i < N*N; i++){
arr[i] = rand() % 100;
//arr[i] = i;
}
}
void seqMatrixMul(float *a1, float *a2, float *aout){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
aout[i*N+j]=0.0;
for(int k = 0; k < N; k++){
aout[N*i+j] += a1[N*i+k]*a2[N*k+j];
}
}
}
}
void wrongNumberCheck(float *a1, float *a2){
int bad = 0;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
if(a1[N*i + j] != a2[N*i + j]){
bad = bad + 1;
}
}
}
printf("Number of wrong multiplications = %i\n", bad);
}
int mulCheck(float *a1, float *a2){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
if(a1[N*i + j] != a2[N*i + j]){
printf("Matrix Multiplication Failed!\n");
printf("index = %i \n", N*i + j);
printf("expected = %f\nreceived = %f\n", a1[N*i + j], a2[N*i+j]);
printf("Next element...\n");
printf("expected = %f\nreceived = %f\n", a1[N*i + j+1], a2[N*i+j+1]);
printf("Checking for number of wrong multiplications...\n");
wrongNumberCheck(a1, a2);
return 1;
}
}
}
printf("Matrix Multiplication Successful!\n");
return 0;
}
int mulCheck2(float *a1, float *a2){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
if(abs(a1[N*i + j] - a2[N*i + j]) > 1e-8){
printf("Matrix Multiplication Failed!\n");
printf("index = %i \n", N*i + j);
printf("row = %i, col = %i\n", i, j);
printf("expected = %f\nreceived = %f\n", a1[N*i + j], a2[N*i+j]);
printf("Next element...\n");
printf("expected = %f\nreceived = %f\n", a1[N*i + j+1], a2[N*i+j+1]);
printf("Checking for number of wrong multiplications...\n");
wrongNumberCheck(a1, a2);
return 1;
}
}
}
printf("Matrix Multiplication Successful!\n");
return 0;
}
__global__ void gpuMatMul(float *a1, float *a2, float *aout){
__shared__ float A[tile_size][tile_size];
__shared__ float B[tile_size][tile_size];
int tc = threadIdx.x;
int tr = threadIdx.y;
int c = blockIdx.x*tile_size + threadIdx.x;
int r = blockIdx.y*tile_size + threadIdx.y;
float sum_val = 0;
for(int i = 0; i < N; i += tile_size){
A[tr][tc] = a1[N*r + i + tc];
B[tr][tc] = a2[c + N*(i + tr)];
__syncthreads();
for(int j = 0; j < tile_size; j++){
sum_val += A[tr][j]*B[j][tc];
}
__syncthreads();
}
aout[N*r + c] = sum_val;
}
int main(void){
// Setup time variables
float timecpu = 0;
float timegpu = 0;
float tpcpu = 0;
float tpgpu = 0;
cudaEvent_t launch_begin_seq, launch_end_seq;
// Host variables
float *h_arr1 = (float*)malloc(N*N*sizeof(float));
float *h_arr2 = (float*)malloc(N*N*sizeof(float));
float *h_out = (float*)malloc(N*N*sizeof(float));
float *h_save = (float*)malloc(N*N*sizeof(float));
//Device variables
float *d_arr1, *d_arr2, *d_out;
cudaMalloc((void**)&d_arr1, N*N*sizeof(float));
cudaMalloc((void**)&d_arr2, N*N*sizeof(float));
cudaMalloc((void**)&d_out, N*N*sizeof(float));
// Check Memory Allocation
if(h_arr1 == 0 || h_arr2 == 0 || h_out == 0 || h_save == 0 || d_arr1 == 0 || d_arr2 == 0 || d_out == 0){
printf("Memory Allocation Failed!\n");
return 1;
}
// Fill Array
fillArray(h_arr1);
fillArray(h_arr2);
memset(h_out, 0, N*N*sizeof(float));
memset(h_save, 0, N*N*sizeof(float));
// Create time variables
cudaEventCreate(&launch_begin_seq);
cudaEventCreate(&launch_end_seq);
//Start CPU Transpose
cudaEventRecord(launch_begin_seq,0);
seqMatrixMul(h_arr1, h_arr2, h_save);
cudaEventRecord(launch_end_seq,0);
cudaEventSynchronize(launch_end_seq);
cudaEventElapsedTime(&timecpu, launch_begin_seq, launch_end_seq);
printf("CPU time: %f ms\n", timecpu);
tpcpu = 1e-9*2*N/(timecpu*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpcpu);
// Prep Grid and Block variables
dim3 dimGrid(N/tile_size, N/tile_size, 1);
dim3 dimBlock(tile_size, block_size, 1);
// Prep device memory
cudaMemset(d_arr1, 0, N*N*sizeof(float));
cudaMemset(d_arr2, 0, N*N*sizeof(float));
cudaMemcpy(d_arr1, h_arr1, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_arr2, h_arr2, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_out, 0, N*N*sizeof(float));
// Create time variables
cudaEventCreate(&launch_begin_seq);
cudaEventCreate(&launch_end_seq);
// Start global GPU multiplication
cudaEventRecord(launch_begin_seq,0);
gpuMatMul<<<dimGrid, dimBlock>>>(d_arr1, d_arr2, d_out);
cudaEventRecord(launch_end_seq,0);
cudaEventSynchronize(launch_end_seq);
// Copy Memory back to Host
cudaMemcpy(h_out, d_out, N*N*sizeof(float), cudaMemcpyDeviceToHost);
// Check For Cuda Errors
checkCUDAError("gpuMatMul");
if(mulCheck2(h_save, h_out) == 0){
cudaEventElapsedTime(&timegpu, launch_begin_seq, launch_end_seq);
printf("GPU time: %f ms\n", timegpu);
tpgpu = 1e-9*2*N/(timegpu*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpgpu);
}
printf("Speed up = %f \n", timecpu/timegpu);
printf("ratio = %f \n\n", tpgpu/tpcpu);
printf("CSV output:\n");
printf("%i,%i,%i,%f,%f,%f,%f,%f,%f", N, tile_size, block_size, timecpu, timegpu, tpcpu, tpgpu, timecpu/timegpu, tpgpu/tpcpu);
free(h_arr1);
free(h_arr2);
free(h_out);
free(h_save);
cudaFree(d_arr1);
cudaFree(d_arr2);
cudaFree(d_out);
return 0;
}
|
466053f8add6799b3956dcd6d6fad32454bb4cc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) return;
bools[index] = idata[index] != 0 ? 1 : 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) return;
if (bools[index] == 1)
odata[indices[index]] = idata[index];
}
}
}
| 466053f8add6799b3956dcd6d6fad32454bb4cc8.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) return;
bools[index] = idata[index] != 0 ? 1 : 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) return;
if (bools[index] == 1)
odata[indices[index]] = idata[index];
}
}
}
|
8d3da6d9e043c966ff6d64529d5ed1a6beb18ec1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hipcub/hipcub.hpp>
#include <memory>
#include <vector>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/operators/layer_norm_op.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
template <typename T>
using LayerNormParamType = typename CudnnDataType<T>::BatchNormParamType;
inline static int GetDesiredBlockDim(int block_dim) {
const int kMaxBlockDim = 512;
return block_dim >= kMaxBlockDim
? kMaxBlockDim
: (1 << (static_cast<int>(std::log2f(block_dim))));
}
#define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \
case (1 << (log2_block_dim)): { \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM_CASE(...) \
FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(2, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(1, ##__VA_ARGS__)
#define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE( \
log2_block_dim, feature_size, kMaxBlockNum, ...) \
case (1 << (log2_block_dim)): { \
for (int i = 0; i < ::ceil(feature_size / (1.0 * kMaxBlockNum)); i++) { \
int col_offset = i * kMaxBlockNum; \
int block_num = ::min(feature_size - col_offset, kMaxBlockNum); \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} \
} break
#define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(feature_size, kMaxBlockNum, ...) \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(9, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(8, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(7, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(6, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(5, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(4, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(3, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(2, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(1, feature_size, kMaxBlockNum, \
##__VA_ARGS__)
static __device__ __forceinline__ float real_sqrt(float x) { return sqrtf(x); }
static __device__ __forceinline__ double real_sqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm(const T &first, const T &second)
: first_(first), second_(second) {}
T first_;
T second_;
};
template <typename T>
struct PairForLayerNormAddFunctor {
__device__ __forceinline__ PairForLayerNorm<T> operator()(
const PairForLayerNorm<T> &p1, const PairForLayerNorm<T> &p2) {
return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_);
}
};
template <typename T>
__inline__ __device__ T rsqrt_(const T val) {
return static_cast<T>(1) / sqrt(val);
}
template <>
__inline__ __device__ float rsqrt_(const float val) {
return rsqrtf(val);
}
template <>
__inline__ __device__ double rsqrt_(const double val) {
return rsqrt(val);
}
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
template <>
__inline__ __device__ half rsqrt_(const half val) {
return hrsqrt(val);
}
#endif
template <typename T, typename U, int BlockDim>
__global__ void LayerNormForward(const T *x, const U *scale, const U *bias,
T *y, U *mean, U *var, float epsilon,
int feature_size) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ U mean_share;
__shared__ U var_share;
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
// Step 1: Reduce to calculate mean and var
U mean_val = 0;
U var_val = 0;
for (int i = beg_idx; i < end_idx; i += BlockDim) {
U tmp = static_cast<U>(x[i]);
mean_val += tmp;
var_val += (tmp * tmp);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(mean_val, var_val),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
auto tmp = pair.first_ / feature_size;
mean[blockIdx.x] = mean_share = static_cast<U>(tmp);
var[blockIdx.x] = var_share =
static_cast<U>(pair.second_ / feature_size - tmp * tmp);
}
__syncthreads();
mean_val = mean_share;
U invvar = rsqrt_<U>(var_share + static_cast<U>(epsilon));
// Step 2: Calculate y
if (scale != nullptr) {
if (bias != nullptr) {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>(
scale[j] * (static_cast<U>(x[i]) - mean_val) * invvar + bias[j]);
}
} else {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>(scale[j] * (static_cast<U>(x[i]) - mean_val) *
invvar);
}
}
} else { // scale == nullptr
if (bias != nullptr) {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) * invvar +
bias[j]);
}
} else {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) * invvar);
}
}
}
}
template <typename T, typename U, int VPT>
__inline__ __device__ void cuLoadAddStridedInputs(
const int i1_block, const int thr_load_row_off, const int thr_load_col_off,
const int i2_off, const int row_stride, U *warp_buf1, U *warp_buf2,
const T *input, const T *dout, const int i1_end, const int n2,
const U *__restrict__ mean, const U *__restrict__ var,
const float epsilon) {
const int i1 = i1_block + thr_load_row_off;
if (i1 >= i1_end) return;
U curr_mean = mean[i1];
U curr_invvar = rsqrt_<U>(var[i1] + epsilon);
for (int k = 0; k < VPT; ++k) {
const int i2 = i2_off + k;
const int load_idx = i1 * n2 + i2;
const int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k;
if (i2 < n2) {
U curr_input = static_cast<U>(input[load_idx]);
U curr_dout = static_cast<U>(dout[load_idx]);
warp_buf1[write_idx] += curr_dout;
warp_buf2[write_idx] +=
curr_dout * (curr_input - curr_mean) * curr_invvar;
}
}
}
template <typename T, typename U, int BDIMX, int BDIMY, int VPTX>
__global__ void LayerNormBackwardPartGradGammaBeta(
const T *__restrict__ dout, const T *__restrict__ input, const int n1,
const int n2, const U *__restrict__ mean, const U *__restrict__ var,
float epsilon, U *part_grad_gamma, U *part_grad_beta) {
// VPTX -> value per thread.x, BDIMX -> blockDim.x, BDIMY -> blockDim.y, BDIMX
// -> blockDim.x
// template for compile time optimizations
constexpr int row_stride = BDIMX + 1;
const int thr_load_col_off = (threadIdx.x * VPTX) & (BDIMX - 1);
const int thr_load_row_off =
(threadIdx.x * VPTX) / BDIMX + threadIdx.y * BDIMY;
const int i2_off = blockIdx.x * BDIMX + thr_load_col_off;
constexpr int shared_cap = (BDIMX * BDIMY > 2 * VPTX * BDIMY * row_stride)
? BDIMX * BDIMY
: 2 * VPTX * BDIMY * row_stride;
__shared__ U buf[shared_cap];
U *warp_buf1 = reinterpret_cast<U *>(buf);
U *warp_buf2 = warp_buf1 + VPTX * BDIMY * row_stride;
for (int idx = threadIdx.y * blockDim.x + threadIdx.x;
idx < 2 * VPTX * BDIMY * row_stride; idx += BDIMX * BDIMY) {
buf[idx] = U(0);
}
__syncthreads();
for (int i1_block = blockIdx.y * BDIMY * VPTX; i1_block < n1;
i1_block += VPTX * BDIMY * gridDim.y) {
cuLoadAddStridedInputs<T, U, VPTX>(
i1_block, thr_load_row_off, thr_load_col_off, i2_off, row_stride,
warp_buf1, warp_buf2, input, dout, n1, n2, mean, var, epsilon);
}
__syncthreads();
// inter-warp reductions
// sum within each warp
U acc1 = U(0);
U acc2 = U(0);
for (int k = 0; k < VPTX; ++k) {
int row1 = threadIdx.y + k * VPTX;
int idx1 = row1 * row_stride + threadIdx.x;
acc1 += warp_buf1[idx1];
acc2 += warp_buf2[idx1];
}
warp_buf1[threadIdx.y * row_stride + threadIdx.x] = acc1;
warp_buf2[threadIdx.y * row_stride + threadIdx.x] = acc2;
__syncthreads();
// sum all warps
for (int offset = VPTX >> 1; offset > 1; offset >>= 1) {
if (threadIdx.y < offset) {
int row1 = threadIdx.y;
int row2 = threadIdx.y + offset;
int idx1 = row1 * row_stride + threadIdx.x;
int idx2 = row2 * row_stride + threadIdx.x;
warp_buf1[idx1] += warp_buf1[idx2];
warp_buf2[idx1] += warp_buf2[idx2];
}
__syncthreads();
}
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.y == 0 && i2 < n2) {
int row1 = threadIdx.y;
int row2 = threadIdx.y + 1;
int idx1 = row1 * row_stride + threadIdx.x;
int idx2 = row2 * row_stride + threadIdx.x;
part_grad_beta[blockIdx.y * n2 + i2] = warp_buf1[idx1] + warp_buf1[idx2];
part_grad_gamma[blockIdx.y * n2 + i2] = warp_buf2[idx1] + warp_buf2[idx2];
}
}
template <typename T, typename U, int BDIMX, int BDIMY>
__global__ void LayerNormBackwardSumGradGammaBeta(
const U *part_grad_gamma, const U *part_grad_beta, const int part_size,
// const int n1, const int n2, T* grad_gamma, T* grad_beta) {
const int n1, const int n2, U *grad_gamma, U *grad_beta) {
// sum partial gradients for gamma and beta
__shared__ U buf[BDIMX * BDIMY];
int i2 = blockIdx.x * BDIMX + threadIdx.x;
if (i2 < n2) {
// each warp does sequential reductions until reduced part_size is num_warps
int num_warp_reductions = part_size / BDIMY;
U sum_gamma = U(0);
U sum_beta = U(0);
const U *part_grad_gamma_ptr =
part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2;
const U *part_grad_beta_ptr =
part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2;
for (int warp_offset = 0; warp_offset < num_warp_reductions;
++warp_offset) {
sum_gamma += part_grad_gamma_ptr[warp_offset * n2];
sum_beta += part_grad_beta_ptr[warp_offset * n2];
}
// inter-warp reductions
constexpr int nbsize3 = BDIMX * BDIMY / 2;
for (int offset = BDIMY / 2; offset >= 1; offset /= 2) {
// top half write to shared memory
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
buf[write_idx] = sum_gamma;
buf[write_idx + nbsize3] = sum_beta;
}
__syncthreads();
// bottom half sums
if (threadIdx.y < offset) {
const int read_idx = threadIdx.y * BDIMX + threadIdx.x;
sum_gamma += buf[read_idx];
sum_beta += buf[read_idx + nbsize3];
}
__syncthreads();
}
// write out fully summed gradients
if (threadIdx.y == 0) {
grad_gamma[i2] = sum_gamma;
grad_beta[i2] = sum_beta;
}
}
}
template <typename T, typename U, int BDIMX, int BDIMY>
__global__ void LayerNormBackwardComputeGradInput(
const T *__restrict__ dout, const T *__restrict__ input, const int n1,
const int n2,
// const U* __restrict__ mean, const U* __restrict__ var, const float
// epsilon, const T* gamma,
const U *__restrict__ mean, const U *__restrict__ var, const float epsilon,
const U *gamma, T *grad_input) {
{
#ifdef __HIPCC__
auto i1 = hipBlockIdx_x;
#else
auto i1 = blockIdx.x;
#endif
U sum_loss1 = U(0);
U sum_loss2 = U(0);
const U c_mean = mean[i1];
const U c_invvar = rsqrt_<U>(var[i1] + epsilon);
const T *k_input = input + i1 * n2;
const T *k_dout = dout + i1 * n2;
constexpr int numx = BDIMX * BDIMY;
const int thrx = threadIdx.x + threadIdx.y * BDIMX;
if (gamma != NULL) {
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
const U c_h = static_cast<U>(k_input[l + k]);
const U c_loss = static_cast<U>(k_dout[l + k]);
sum_loss1 += c_loss * gamma[l + k];
sum_loss2 += c_loss * gamma[l + k] * (c_h - c_mean) * c_invvar;
}
}
for (; l < n2; ++l) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
sum_loss1 += c_loss * gamma[l];
sum_loss2 += c_loss * gamma[l] * (c_h - c_mean) * c_invvar;
}
} else {
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
const U c_h = static_cast<U>(k_input[l + k]);
const U c_loss = static_cast<U>(k_dout[l + k]);
sum_loss1 += c_loss;
sum_loss2 += c_loss * (c_h - c_mean) * c_invvar;
}
}
for (; l < n2; ++l) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
sum_loss1 += c_loss;
sum_loss2 += c_loss * (c_h - c_mean) * c_invvar;
}
}
// intra-warp reductions
for (int mask = BDIMX / 2; mask > 0; mask /= 2) {
sum_loss1 +=
__shfl_xor_sync(0xffffffff, sum_loss1, mask,
warpSize); // WARP_SHFL_XOR(sum_loss1, mask);
sum_loss2 +=
__shfl_xor_sync(0xffffffff, sum_loss2, mask,
warpSize); // WARP_SHFL_XOR(sum_loss2, mask);
}
// inter-warp reductions
if (BDIMY > 1) {
__shared__ U buf[BDIMX * BDIMY];
for (int offset = BDIMY / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_i = (threadIdx.y - offset) * BDIMX + threadIdx.x;
buf[2 * wrt_i] = sum_loss1;
buf[2 * wrt_i + 1] = sum_loss2;
}
__syncthreads();
// lower half merges
if (threadIdx.y < offset) {
const int read_i = threadIdx.y * blockDim.x + threadIdx.x;
sum_loss1 += buf[2 * read_i];
sum_loss2 += buf[2 * read_i + 1];
}
__syncthreads();
}
if (threadIdx.y == 0) {
buf[2 * threadIdx.x] = sum_loss1;
buf[2 * threadIdx.x + 1] = sum_loss2;
}
__syncthreads();
if (threadIdx.y != 0) {
sum_loss1 = buf[2 * threadIdx.x];
sum_loss2 = buf[2 * threadIdx.x + 1];
}
}
// all threads now have the two sums over l
U fH = (U)n2;
U term1 = (U(1) / fH) * c_invvar;
T *k_grad_input = grad_input + i1 * n2;
if (gamma != NULL) {
for (int l = thrx; l < n2; l += numx) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
U f_grad_input = fH * c_loss * gamma[l];
f_grad_input -= sum_loss1;
f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2;
f_grad_input *= term1;
k_grad_input[l] = static_cast<T>(f_grad_input);
}
} else {
for (int l = thrx; l < n2; l += numx) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
U f_grad_input = fH * c_loss;
f_grad_input -= sum_loss1;
f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2;
f_grad_input *= term1;
k_grad_input[l] = static_cast<T>(f_grad_input);
}
}
}
}
// Make sure that d_scale != nullptr && d_bias != nullptr
// Since d_scale != nullptr, scale would not be nullptr
template <typename T, typename U, int BlockDim, bool HasDx>
__global__ void LayerNormBackwardGradientAll(const T *x, const T *d_y,
U *d_scale, U *d_bias, T *d_x,
const U *mean, const U *var,
const U *scale, float epsilon,
int batch_size, int feature_size,
int col_offset) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int beg_idx = threadIdx.x * feature_size + (blockIdx.x + col_offset);
int end_idx = batch_size * feature_size + (blockIdx.x + col_offset);
int stride = BlockDim * feature_size;
U d_scale_partial = static_cast<U>(0), d_bias_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += stride) {
int row_idx = i / feature_size;
auto var_val = real_sqrt(static_cast<U>(var[row_idx]) + epsilon);
d_scale_partial += static_cast<U>(d_y[i]) *
(static_cast<U>(x[i]) - mean[row_idx]) / var_val;
d_bias_partial += static_cast<U>(d_y[i]);
if (HasDx) {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) *
scale[blockIdx.x + col_offset] / var_val);
}
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_scale_partial, d_bias_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_scale[blockIdx.x + col_offset] = pair.first_;
d_bias[blockIdx.x + col_offset] = pair.second_;
}
}
// Make sure that there is only one true expression: d_scale != nullptr
// or d_bias != nullptr
// Notice: scale may be nullptr
template <typename T, typename U, int BlockDim, bool HasDx, bool HasDScale>
__global__ void LayerNormBackwardGradientScaleOrBias(
const T *x, const T *d_y, U *d_scale, U *d_bias, T *d_x, const U *mean,
const U *var, const U *scale, float epsilon, int batch_size,
int feature_size, int col_offset) {
using BlockReduce = hipcub::BlockReduce<U, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int beg_idx = threadIdx.x * feature_size + blockIdx.x + col_offset;
int end_idx = batch_size * feature_size + blockIdx.x + col_offset;
int stride = BlockDim * feature_size;
U d_scale_or_d_bias_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += stride) {
int row_idx = i / feature_size;
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(var[row_idx]) + epsilon));
if (HasDScale) {
d_scale_or_d_bias_partial += static_cast<U>(d_y[i]) *
(static_cast<U>(x[i]) - mean[row_idx]) /
var_val;
} else { // d_bias != nullptr
d_scale_or_d_bias_partial += static_cast<U>(d_y[i]);
}
if (HasDx) {
if (scale != nullptr) {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) *
scale[blockIdx.x + col_offset] / var_val);
} else {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val);
}
}
}
d_scale_or_d_bias_partial =
BlockReduce(temp_storage).Reduce(d_scale_or_d_bias_partial, hipcub::Sum());
if (threadIdx.x == 0) {
if (HasDScale) {
d_scale[blockIdx.x + col_offset] = d_scale_or_d_bias_partial;
} else {
d_bias[blockIdx.x + col_offset] = d_scale_or_d_bias_partial;
}
}
}
template <typename T, typename U, int BlockDim>
__global__ void LayerNormBackwardPostProcessToCalculateDX(const T *x, T *d_x,
const U *mean,
const U *var,
float epsilon,
int feature_size) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ U d_x_reduce_tmp[2];
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
U block_mean = mean[blockIdx.x];
U block_var = var[blockIdx.x];
U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x_mean_partial += static_cast<U>(d_x[i]);
d_x_var_partial +=
static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean);
}
auto pair =
BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size;
d_x_reduce_tmp[1] =
static_cast<float>(pair.second_) /
(feature_size * (static_cast<float>(block_var) + epsilon));
}
__syncthreads();
d_x_mean_partial = d_x_reduce_tmp[0];
d_x_var_partial = d_x_reduce_tmp[1];
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x[i] -= static_cast<T>(d_x_mean_partial);
d_x[i] -=
static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial);
}
}
// Here, we only calculate d_x
template <typename T, typename U, int BlockDim>
__global__ void LayerNormBackwardGradientOnlyDX(const T *x, const T *d_y,
T *d_x, const U *mean,
const U *var, const U *scale,
float epsilon,
int feature_size) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ U d_x_reduce_tmp[2];
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
U block_mean = mean[blockIdx.x], block_var = var[blockIdx.x];
U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += BlockDim) {
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(block_var) + epsilon));
if (scale != nullptr) {
int col_idx = i % feature_size;
d_x[i] =
static_cast<T>(static_cast<U>(d_y[i]) * scale[col_idx] / var_val);
} else {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val);
}
d_x_mean_partial += static_cast<U>(d_x[i]);
d_x_var_partial +=
static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean);
}
auto pair =
BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size;
d_x_reduce_tmp[1] =
static_cast<float>(pair.second_) /
(feature_size * (static_cast<float>(block_var) + epsilon));
}
__syncthreads();
d_x_mean_partial = d_x_reduce_tmp[0];
d_x_var_partial = d_x_reduce_tmp[1];
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x[i] -= static_cast<T>(d_x_mean_partial);
d_x[i] -=
static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial);
}
}
template <typename T, typename U>
__global__ void LayerNormBackwardWhenBatchSizeIsOne(
const T *x, const T *d_y, T *d_x, U *d_scale, U *d_bias, const U *mean,
const U *var, const U *scale, float epsilon, int feature_size) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < feature_size) {
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(var[idx]) + epsilon));
if (d_x != nullptr) {
if (d_scale == nullptr) {
d_x[idx] = static_cast<T>(static_cast<U>(d_y[idx]) / var_val);
} else {
d_x[idx] =
static_cast<T>(static_cast<U>(d_y[idx]) * scale[idx] / var_val);
}
}
if (d_scale != nullptr) {
d_scale[idx] = static_cast<U>(d_y[idx]) *
(static_cast<U>(x[idx]) - mean[idx]) / var_val;
}
if (d_bias != nullptr) d_bias[idx] = static_cast<U>(d_y[idx]);
}
}
template <typename T, typename U>
static void LayerNormBackward(const T *x, const T *d_y, const U *scale,
const U *mean, const U *var, T *d_x, U *d_scale,
U *d_bias, float epsilon, int batch_size,
int feature_size,
const framework::ExecutionContext &ctx) {
auto &dev_ctx = ctx.cuda_device_context();
auto stream = dev_ctx.stream();
const int kMaxBlockDim = 512;
const int kMaxBlockNum = 128;
int gradient_flag = ((d_x != nullptr ? 1 : 0) << 2) |
((d_scale != nullptr ? 1 : 0) << 1) |
((d_bias != nullptr ? 1 : 0));
if (gradient_flag == 0) return;
if (batch_size == 1) {
hipLaunchKernelGGL(( LayerNormBackwardWhenBatchSizeIsOne<
T, U>), dim3((feature_size + kMaxBlockDim - 1) / kMaxBlockDim), dim3(kMaxBlockDim),
0, stream, x, d_y, d_x, d_scale, d_bias, mean, var, scale,
epsilon, feature_size);
if (d_x != nullptr) {
switch (GetDesiredBlockDim(feature_size)) {
hipLaunchKernelGGL(( FIXED_BLOCK_DIM_CASE(LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim>), dim3(1), dim3(kBlockDim), 0, stream,
x, d_x, mean, var, epsilon, feature_size));
}
}
return;
}
auto block_dim = GetDesiredBlockDim(batch_size);
switch (gradient_flag) {
case 1: // d_x == nulptr, d_scale == nullptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, false,
false>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 2: // d_x == nullptr, d_scale != nullptr, d_bias == nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, false,
true>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 3: // d_x == nullptr, d_scale != nulptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientAll<
T, U, kBlockDim, false>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 4: // d_x != nullptr, d_scale == nullptr, d_bias == nullptr
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormBackwardGradientOnlyDX<
T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
x, d_y, d_x, mean, var, scale, epsilon, feature_size));
}
break;
case 5: // d_x != nulptr, d_scale == nullptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, true,
false>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
x, d_x, mean, var, epsilon, feature_size));
}
break;
case 6: // d_x != nullptr, d_scale != nullptr, d_bias == nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, true,
true>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
x, d_x, mean, var, epsilon, feature_size));
}
break;
case 7: // d_x != nullptr, d_scale != nullptr, d_bias != nullptr
{
constexpr int VPT = 4;
constexpr int BDIMX2 = 32;
constexpr int BDIMY2 = 4;
dim3 threads2(BDIMX2, BDIMY2, 1);
constexpr int part_size = BDIMY2 * VPT;
const dim3 blocks2((feature_size + BDIMX2 - 1) / BDIMX2, part_size, 1);
auto part_grad_gamma_ptr =
memory::Alloc(dev_ctx, part_size * feature_size * sizeof(U));
auto part_grad_beta_ptr =
memory::Alloc(dev_ctx, part_size * feature_size * sizeof(U));
U *part_grad_gamma = reinterpret_cast<U *>(part_grad_gamma_ptr->ptr());
U *part_grad_beta = reinterpret_cast<U *>(part_grad_beta_ptr->ptr());
hipLaunchKernelGGL(( LayerNormBackwardPartGradGammaBeta<T, U, BDIMX2, BDIMY2,
VPT>), dim3(blocks2), dim3(threads2), 0, stream,
d_y, x, batch_size, feature_size, mean, var, epsilon, part_grad_gamma,
part_grad_beta); // compute part_grad_gamma, beta
constexpr int BDIMX3 = 32;
constexpr int BDIMY3 = 8;
dim3 threads3(BDIMX3, BDIMY3, 1);
const dim3 blocks3((feature_size + BDIMX2 - 1) / BDIMX2, 1, 1);
hipLaunchKernelGGL(( LayerNormBackwardSumGradGammaBeta<
T, U, BDIMX3, BDIMY3>), dim3(blocks3), dim3(threads3), 0, stream,
part_grad_gamma, part_grad_beta, part_size, batch_size, feature_size,
d_scale, d_bias);
constexpr int BDIMX1 = 32;
constexpr int BDIMY1 = 4;
dim3 threads1(BDIMX1, BDIMY1, 1);
hipLaunchKernelGGL(( LayerNormBackwardComputeGradInput<
T, U, BDIMX1, BDIMY1>), dim3(batch_size), dim3(threads1), 0, stream,
d_y, x, batch_size, feature_size, mean, var, epsilon, scale, d_x);
break;
}
default:
break;
}
}
template <typename T>
void LayerNormDirectCUDAFunctor<T>::operator()(hipStream_t stream,
const T *input,
std::vector<int> input_shape,
const T *bias, const T *scale,
T *output, T *mean, T *variance,
int begin_norm_axis, float eps) {
const auto x_dims = framework::make_ddim(input_shape);
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormForward<T, T, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
input, scale, bias, output, mean, variance, eps, feature_size));
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Product from begin_norm_axis to end in layer_norm must be larger "
"than 1"));
break;
}
}
template <typename T>
class LayerNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const float epsilon = ctx.Attr<float>("epsilon");
auto *scale = ctx.Input<Tensor>("Scale");
auto *bias = ctx.Input<Tensor>("Bias");
auto *x = ctx.Input<Tensor>("X");
auto *y = ctx.Output<Tensor>("Y");
auto *mean = ctx.Output<Tensor>("Mean");
auto *var = ctx.Output<Tensor>("Variance");
const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis");
const auto x_dims = x->dims();
auto *x_data = x->data<T>();
auto *y_data = y->mutable_data<T>(ctx.GetPlace());
auto *mean_data = mean->mutable_data<U>(ctx.GetPlace());
auto *var_data = var->mutable_data<U>(ctx.GetPlace());
auto *scale_data = (scale == nullptr ? nullptr : scale->data<U>());
auto *bias_data = (bias == nullptr ? nullptr : bias->data<U>());
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
auto stream = ctx.cuda_device_context().stream();
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormForward<T, U,
kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
x_data, scale_data, bias_data, y_data, mean_data, var_data,
epsilon, feature_size));
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Product from begin_norm_axis to end must be larger than 1"));
break;
}
}
};
template <typename T>
class LayerNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const float epsilon = ctx.Attr<float>("epsilon");
// d_x, d_scale, d_bias may be nullptr
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
auto *x = ctx.Input<Tensor>("X");
auto *mean = ctx.Input<Tensor>("Mean");
auto *var = ctx.Input<Tensor>("Variance");
auto *scale = ctx.Input<Tensor>("Scale");
auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto *x_data = x->data<T>();
auto *d_y_data = d_y->data<T>();
auto *mean_data = mean->data<U>();
auto *var_data = var->data<U>();
auto *scale_data = (scale == nullptr ? nullptr : scale->data<U>());
auto *d_scale_data =
(d_scale == nullptr ? nullptr
: d_scale->mutable_data<U>(ctx.GetPlace()));
auto *d_bias_data =
(d_bias == nullptr ? nullptr : d_bias->mutable_data<U>(ctx.GetPlace()));
auto *d_x_data =
(d_x == nullptr ? nullptr : d_x->mutable_data<T>(ctx.GetPlace()));
const auto &x_dims = x->dims();
const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis");
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
LayerNormBackward<T, U>(x_data, d_y_data, scale_data, mean_data, var_data,
d_x_data, d_scale_data, d_bias_data, epsilon,
batch_size, feature_size, ctx);
}
};
template class LayerNormDirectCUDAFunctor<float>;
#undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE
#undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE
#undef FIXED_BLOCK_DIM_CASE_BASE
#undef FIXED_BLOCK_DIM_CASE
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
layer_norm,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, float>,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, double>,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
layer_norm_grad,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext,
plat::float16>);
| 8d3da6d9e043c966ff6d64529d5ed1a6beb18ec1.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cub/cub.cuh>
#include <memory>
#include <vector>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/operators/layer_norm_op.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
template <typename T>
using LayerNormParamType = typename CudnnDataType<T>::BatchNormParamType;
inline static int GetDesiredBlockDim(int block_dim) {
const int kMaxBlockDim = 512;
return block_dim >= kMaxBlockDim
? kMaxBlockDim
: (1 << (static_cast<int>(std::log2f(block_dim))));
}
#define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \
case (1 << (log2_block_dim)): { \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM_CASE(...) \
FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(2, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(1, ##__VA_ARGS__)
#define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE( \
log2_block_dim, feature_size, kMaxBlockNum, ...) \
case (1 << (log2_block_dim)): { \
for (int i = 0; i < std::ceil(feature_size / (1.0 * kMaxBlockNum)); i++) { \
int col_offset = i * kMaxBlockNum; \
int block_num = std::min(feature_size - col_offset, kMaxBlockNum); \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} \
} break
#define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(feature_size, kMaxBlockNum, ...) \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(9, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(8, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(7, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(6, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(5, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(4, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(3, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(2, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(1, feature_size, kMaxBlockNum, \
##__VA_ARGS__)
static __device__ __forceinline__ float real_sqrt(float x) { return sqrtf(x); }
static __device__ __forceinline__ double real_sqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm(const T &first, const T &second)
: first_(first), second_(second) {}
T first_;
T second_;
};
template <typename T>
struct PairForLayerNormAddFunctor {
__device__ __forceinline__ PairForLayerNorm<T> operator()(
const PairForLayerNorm<T> &p1, const PairForLayerNorm<T> &p2) {
return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_);
}
};
template <typename T>
__inline__ __device__ T rsqrt_(const T val) {
return static_cast<T>(1) / sqrt(val);
}
template <>
__inline__ __device__ float rsqrt_(const float val) {
return rsqrtf(val);
}
template <>
__inline__ __device__ double rsqrt_(const double val) {
return rsqrt(val);
}
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
template <>
__inline__ __device__ half rsqrt_(const half val) {
return hrsqrt(val);
}
#endif
template <typename T, typename U, int BlockDim>
__global__ void LayerNormForward(const T *x, const U *scale, const U *bias,
T *y, U *mean, U *var, float epsilon,
int feature_size) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ U mean_share;
__shared__ U var_share;
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
// Step 1: Reduce to calculate mean and var
U mean_val = 0;
U var_val = 0;
for (int i = beg_idx; i < end_idx; i += BlockDim) {
U tmp = static_cast<U>(x[i]);
mean_val += tmp;
var_val += (tmp * tmp);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(mean_val, var_val),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
auto tmp = pair.first_ / feature_size;
mean[blockIdx.x] = mean_share = static_cast<U>(tmp);
var[blockIdx.x] = var_share =
static_cast<U>(pair.second_ / feature_size - tmp * tmp);
}
__syncthreads();
mean_val = mean_share;
U invvar = rsqrt_<U>(var_share + static_cast<U>(epsilon));
// Step 2: Calculate y
if (scale != nullptr) {
if (bias != nullptr) {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>(
scale[j] * (static_cast<U>(x[i]) - mean_val) * invvar + bias[j]);
}
} else {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>(scale[j] * (static_cast<U>(x[i]) - mean_val) *
invvar);
}
}
} else { // scale == nullptr
if (bias != nullptr) {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) * invvar +
bias[j]);
}
} else {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) * invvar);
}
}
}
}
template <typename T, typename U, int VPT>
__inline__ __device__ void cuLoadAddStridedInputs(
const int i1_block, const int thr_load_row_off, const int thr_load_col_off,
const int i2_off, const int row_stride, U *warp_buf1, U *warp_buf2,
const T *input, const T *dout, const int i1_end, const int n2,
const U *__restrict__ mean, const U *__restrict__ var,
const float epsilon) {
const int i1 = i1_block + thr_load_row_off;
if (i1 >= i1_end) return;
U curr_mean = mean[i1];
U curr_invvar = rsqrt_<U>(var[i1] + epsilon);
for (int k = 0; k < VPT; ++k) {
const int i2 = i2_off + k;
const int load_idx = i1 * n2 + i2;
const int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k;
if (i2 < n2) {
U curr_input = static_cast<U>(input[load_idx]);
U curr_dout = static_cast<U>(dout[load_idx]);
warp_buf1[write_idx] += curr_dout;
warp_buf2[write_idx] +=
curr_dout * (curr_input - curr_mean) * curr_invvar;
}
}
}
template <typename T, typename U, int BDIMX, int BDIMY, int VPTX>
__global__ void LayerNormBackwardPartGradGammaBeta(
const T *__restrict__ dout, const T *__restrict__ input, const int n1,
const int n2, const U *__restrict__ mean, const U *__restrict__ var,
float epsilon, U *part_grad_gamma, U *part_grad_beta) {
// VPTX -> value per thread.x, BDIMX -> blockDim.x, BDIMY -> blockDim.y, BDIMX
// -> blockDim.x
// template for compile time optimizations
constexpr int row_stride = BDIMX + 1;
const int thr_load_col_off = (threadIdx.x * VPTX) & (BDIMX - 1);
const int thr_load_row_off =
(threadIdx.x * VPTX) / BDIMX + threadIdx.y * BDIMY;
const int i2_off = blockIdx.x * BDIMX + thr_load_col_off;
constexpr int shared_cap = (BDIMX * BDIMY > 2 * VPTX * BDIMY * row_stride)
? BDIMX * BDIMY
: 2 * VPTX * BDIMY * row_stride;
__shared__ U buf[shared_cap];
U *warp_buf1 = reinterpret_cast<U *>(buf);
U *warp_buf2 = warp_buf1 + VPTX * BDIMY * row_stride;
for (int idx = threadIdx.y * blockDim.x + threadIdx.x;
idx < 2 * VPTX * BDIMY * row_stride; idx += BDIMX * BDIMY) {
buf[idx] = U(0);
}
__syncthreads();
for (int i1_block = blockIdx.y * BDIMY * VPTX; i1_block < n1;
i1_block += VPTX * BDIMY * gridDim.y) {
cuLoadAddStridedInputs<T, U, VPTX>(
i1_block, thr_load_row_off, thr_load_col_off, i2_off, row_stride,
warp_buf1, warp_buf2, input, dout, n1, n2, mean, var, epsilon);
}
__syncthreads();
// inter-warp reductions
// sum within each warp
U acc1 = U(0);
U acc2 = U(0);
for (int k = 0; k < VPTX; ++k) {
int row1 = threadIdx.y + k * VPTX;
int idx1 = row1 * row_stride + threadIdx.x;
acc1 += warp_buf1[idx1];
acc2 += warp_buf2[idx1];
}
warp_buf1[threadIdx.y * row_stride + threadIdx.x] = acc1;
warp_buf2[threadIdx.y * row_stride + threadIdx.x] = acc2;
__syncthreads();
// sum all warps
for (int offset = VPTX >> 1; offset > 1; offset >>= 1) {
if (threadIdx.y < offset) {
int row1 = threadIdx.y;
int row2 = threadIdx.y + offset;
int idx1 = row1 * row_stride + threadIdx.x;
int idx2 = row2 * row_stride + threadIdx.x;
warp_buf1[idx1] += warp_buf1[idx2];
warp_buf2[idx1] += warp_buf2[idx2];
}
__syncthreads();
}
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.y == 0 && i2 < n2) {
int row1 = threadIdx.y;
int row2 = threadIdx.y + 1;
int idx1 = row1 * row_stride + threadIdx.x;
int idx2 = row2 * row_stride + threadIdx.x;
part_grad_beta[blockIdx.y * n2 + i2] = warp_buf1[idx1] + warp_buf1[idx2];
part_grad_gamma[blockIdx.y * n2 + i2] = warp_buf2[idx1] + warp_buf2[idx2];
}
}
template <typename T, typename U, int BDIMX, int BDIMY>
__global__ void LayerNormBackwardSumGradGammaBeta(
const U *part_grad_gamma, const U *part_grad_beta, const int part_size,
// const int n1, const int n2, T* grad_gamma, T* grad_beta) {
const int n1, const int n2, U *grad_gamma, U *grad_beta) {
// sum partial gradients for gamma and beta
__shared__ U buf[BDIMX * BDIMY];
int i2 = blockIdx.x * BDIMX + threadIdx.x;
if (i2 < n2) {
// each warp does sequential reductions until reduced part_size is num_warps
int num_warp_reductions = part_size / BDIMY;
U sum_gamma = U(0);
U sum_beta = U(0);
const U *part_grad_gamma_ptr =
part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2;
const U *part_grad_beta_ptr =
part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2;
for (int warp_offset = 0; warp_offset < num_warp_reductions;
++warp_offset) {
sum_gamma += part_grad_gamma_ptr[warp_offset * n2];
sum_beta += part_grad_beta_ptr[warp_offset * n2];
}
// inter-warp reductions
constexpr int nbsize3 = BDIMX * BDIMY / 2;
for (int offset = BDIMY / 2; offset >= 1; offset /= 2) {
// top half write to shared memory
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
buf[write_idx] = sum_gamma;
buf[write_idx + nbsize3] = sum_beta;
}
__syncthreads();
// bottom half sums
if (threadIdx.y < offset) {
const int read_idx = threadIdx.y * BDIMX + threadIdx.x;
sum_gamma += buf[read_idx];
sum_beta += buf[read_idx + nbsize3];
}
__syncthreads();
}
// write out fully summed gradients
if (threadIdx.y == 0) {
grad_gamma[i2] = sum_gamma;
grad_beta[i2] = sum_beta;
}
}
}
template <typename T, typename U, int BDIMX, int BDIMY>
__global__ void LayerNormBackwardComputeGradInput(
const T *__restrict__ dout, const T *__restrict__ input, const int n1,
const int n2,
// const U* __restrict__ mean, const U* __restrict__ var, const float
// epsilon, const T* gamma,
const U *__restrict__ mean, const U *__restrict__ var, const float epsilon,
const U *gamma, T *grad_input) {
{
#ifdef __HIPCC__
auto i1 = hipBlockIdx_x;
#else
auto i1 = blockIdx.x;
#endif
U sum_loss1 = U(0);
U sum_loss2 = U(0);
const U c_mean = mean[i1];
const U c_invvar = rsqrt_<U>(var[i1] + epsilon);
const T *k_input = input + i1 * n2;
const T *k_dout = dout + i1 * n2;
constexpr int numx = BDIMX * BDIMY;
const int thrx = threadIdx.x + threadIdx.y * BDIMX;
if (gamma != NULL) {
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
const U c_h = static_cast<U>(k_input[l + k]);
const U c_loss = static_cast<U>(k_dout[l + k]);
sum_loss1 += c_loss * gamma[l + k];
sum_loss2 += c_loss * gamma[l + k] * (c_h - c_mean) * c_invvar;
}
}
for (; l < n2; ++l) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
sum_loss1 += c_loss * gamma[l];
sum_loss2 += c_loss * gamma[l] * (c_h - c_mean) * c_invvar;
}
} else {
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
const U c_h = static_cast<U>(k_input[l + k]);
const U c_loss = static_cast<U>(k_dout[l + k]);
sum_loss1 += c_loss;
sum_loss2 += c_loss * (c_h - c_mean) * c_invvar;
}
}
for (; l < n2; ++l) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
sum_loss1 += c_loss;
sum_loss2 += c_loss * (c_h - c_mean) * c_invvar;
}
}
// intra-warp reductions
for (int mask = BDIMX / 2; mask > 0; mask /= 2) {
sum_loss1 +=
__shfl_xor_sync(0xffffffff, sum_loss1, mask,
warpSize); // WARP_SHFL_XOR(sum_loss1, mask);
sum_loss2 +=
__shfl_xor_sync(0xffffffff, sum_loss2, mask,
warpSize); // WARP_SHFL_XOR(sum_loss2, mask);
}
// inter-warp reductions
if (BDIMY > 1) {
__shared__ U buf[BDIMX * BDIMY];
for (int offset = BDIMY / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_i = (threadIdx.y - offset) * BDIMX + threadIdx.x;
buf[2 * wrt_i] = sum_loss1;
buf[2 * wrt_i + 1] = sum_loss2;
}
__syncthreads();
// lower half merges
if (threadIdx.y < offset) {
const int read_i = threadIdx.y * blockDim.x + threadIdx.x;
sum_loss1 += buf[2 * read_i];
sum_loss2 += buf[2 * read_i + 1];
}
__syncthreads();
}
if (threadIdx.y == 0) {
buf[2 * threadIdx.x] = sum_loss1;
buf[2 * threadIdx.x + 1] = sum_loss2;
}
__syncthreads();
if (threadIdx.y != 0) {
sum_loss1 = buf[2 * threadIdx.x];
sum_loss2 = buf[2 * threadIdx.x + 1];
}
}
// all threads now have the two sums over l
U fH = (U)n2;
U term1 = (U(1) / fH) * c_invvar;
T *k_grad_input = grad_input + i1 * n2;
if (gamma != NULL) {
for (int l = thrx; l < n2; l += numx) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
U f_grad_input = fH * c_loss * gamma[l];
f_grad_input -= sum_loss1;
f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2;
f_grad_input *= term1;
k_grad_input[l] = static_cast<T>(f_grad_input);
}
} else {
for (int l = thrx; l < n2; l += numx) {
const U c_h = static_cast<U>(k_input[l]);
const U c_loss = static_cast<U>(k_dout[l]);
U f_grad_input = fH * c_loss;
f_grad_input -= sum_loss1;
f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2;
f_grad_input *= term1;
k_grad_input[l] = static_cast<T>(f_grad_input);
}
}
}
}
// Make sure that d_scale != nullptr && d_bias != nullptr
// Since d_scale != nullptr, scale would not be nullptr
template <typename T, typename U, int BlockDim, bool HasDx>
__global__ void LayerNormBackwardGradientAll(const T *x, const T *d_y,
U *d_scale, U *d_bias, T *d_x,
const U *mean, const U *var,
const U *scale, float epsilon,
int batch_size, int feature_size,
int col_offset) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int beg_idx = threadIdx.x * feature_size + (blockIdx.x + col_offset);
int end_idx = batch_size * feature_size + (blockIdx.x + col_offset);
int stride = BlockDim * feature_size;
U d_scale_partial = static_cast<U>(0), d_bias_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += stride) {
int row_idx = i / feature_size;
auto var_val = real_sqrt(static_cast<U>(var[row_idx]) + epsilon);
d_scale_partial += static_cast<U>(d_y[i]) *
(static_cast<U>(x[i]) - mean[row_idx]) / var_val;
d_bias_partial += static_cast<U>(d_y[i]);
if (HasDx) {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) *
scale[blockIdx.x + col_offset] / var_val);
}
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_scale_partial, d_bias_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_scale[blockIdx.x + col_offset] = pair.first_;
d_bias[blockIdx.x + col_offset] = pair.second_;
}
}
// Make sure that there is only one true expression: d_scale != nullptr
// or d_bias != nullptr
// Notice: scale may be nullptr
template <typename T, typename U, int BlockDim, bool HasDx, bool HasDScale>
__global__ void LayerNormBackwardGradientScaleOrBias(
const T *x, const T *d_y, U *d_scale, U *d_bias, T *d_x, const U *mean,
const U *var, const U *scale, float epsilon, int batch_size,
int feature_size, int col_offset) {
using BlockReduce = cub::BlockReduce<U, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int beg_idx = threadIdx.x * feature_size + blockIdx.x + col_offset;
int end_idx = batch_size * feature_size + blockIdx.x + col_offset;
int stride = BlockDim * feature_size;
U d_scale_or_d_bias_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += stride) {
int row_idx = i / feature_size;
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(var[row_idx]) + epsilon));
if (HasDScale) {
d_scale_or_d_bias_partial += static_cast<U>(d_y[i]) *
(static_cast<U>(x[i]) - mean[row_idx]) /
var_val;
} else { // d_bias != nullptr
d_scale_or_d_bias_partial += static_cast<U>(d_y[i]);
}
if (HasDx) {
if (scale != nullptr) {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) *
scale[blockIdx.x + col_offset] / var_val);
} else {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val);
}
}
}
d_scale_or_d_bias_partial =
BlockReduce(temp_storage).Reduce(d_scale_or_d_bias_partial, cub::Sum());
if (threadIdx.x == 0) {
if (HasDScale) {
d_scale[blockIdx.x + col_offset] = d_scale_or_d_bias_partial;
} else {
d_bias[blockIdx.x + col_offset] = d_scale_or_d_bias_partial;
}
}
}
template <typename T, typename U, int BlockDim>
__global__ void LayerNormBackwardPostProcessToCalculateDX(const T *x, T *d_x,
const U *mean,
const U *var,
float epsilon,
int feature_size) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ U d_x_reduce_tmp[2];
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
U block_mean = mean[blockIdx.x];
U block_var = var[blockIdx.x];
U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x_mean_partial += static_cast<U>(d_x[i]);
d_x_var_partial +=
static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean);
}
auto pair =
BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size;
d_x_reduce_tmp[1] =
static_cast<float>(pair.second_) /
(feature_size * (static_cast<float>(block_var) + epsilon));
}
__syncthreads();
d_x_mean_partial = d_x_reduce_tmp[0];
d_x_var_partial = d_x_reduce_tmp[1];
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x[i] -= static_cast<T>(d_x_mean_partial);
d_x[i] -=
static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial);
}
}
// Here, we only calculate d_x
template <typename T, typename U, int BlockDim>
__global__ void LayerNormBackwardGradientOnlyDX(const T *x, const T *d_y,
T *d_x, const U *mean,
const U *var, const U *scale,
float epsilon,
int feature_size) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ U d_x_reduce_tmp[2];
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
U block_mean = mean[blockIdx.x], block_var = var[blockIdx.x];
U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += BlockDim) {
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(block_var) + epsilon));
if (scale != nullptr) {
int col_idx = i % feature_size;
d_x[i] =
static_cast<T>(static_cast<U>(d_y[i]) * scale[col_idx] / var_val);
} else {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val);
}
d_x_mean_partial += static_cast<U>(d_x[i]);
d_x_var_partial +=
static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean);
}
auto pair =
BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size;
d_x_reduce_tmp[1] =
static_cast<float>(pair.second_) /
(feature_size * (static_cast<float>(block_var) + epsilon));
}
__syncthreads();
d_x_mean_partial = d_x_reduce_tmp[0];
d_x_var_partial = d_x_reduce_tmp[1];
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x[i] -= static_cast<T>(d_x_mean_partial);
d_x[i] -=
static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial);
}
}
template <typename T, typename U>
__global__ void LayerNormBackwardWhenBatchSizeIsOne(
const T *x, const T *d_y, T *d_x, U *d_scale, U *d_bias, const U *mean,
const U *var, const U *scale, float epsilon, int feature_size) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < feature_size) {
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(var[idx]) + epsilon));
if (d_x != nullptr) {
if (d_scale == nullptr) {
d_x[idx] = static_cast<T>(static_cast<U>(d_y[idx]) / var_val);
} else {
d_x[idx] =
static_cast<T>(static_cast<U>(d_y[idx]) * scale[idx] / var_val);
}
}
if (d_scale != nullptr) {
d_scale[idx] = static_cast<U>(d_y[idx]) *
(static_cast<U>(x[idx]) - mean[idx]) / var_val;
}
if (d_bias != nullptr) d_bias[idx] = static_cast<U>(d_y[idx]);
}
}
template <typename T, typename U>
static void LayerNormBackward(const T *x, const T *d_y, const U *scale,
const U *mean, const U *var, T *d_x, U *d_scale,
U *d_bias, float epsilon, int batch_size,
int feature_size,
const framework::ExecutionContext &ctx) {
auto &dev_ctx = ctx.cuda_device_context();
auto stream = dev_ctx.stream();
const int kMaxBlockDim = 512;
const int kMaxBlockNum = 128;
int gradient_flag = ((d_x != nullptr ? 1 : 0) << 2) |
((d_scale != nullptr ? 1 : 0) << 1) |
((d_bias != nullptr ? 1 : 0));
if (gradient_flag == 0) return;
if (batch_size == 1) {
LayerNormBackwardWhenBatchSizeIsOne<
T, U><<<(feature_size + kMaxBlockDim - 1) / kMaxBlockDim, kMaxBlockDim,
0, stream>>>(x, d_y, d_x, d_scale, d_bias, mean, var, scale,
epsilon, feature_size);
if (d_x != nullptr) {
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim><<<1, kBlockDim, 0, stream>>>(
x, d_x, mean, var, epsilon, feature_size));
}
}
return;
}
auto block_dim = GetDesiredBlockDim(batch_size);
switch (gradient_flag) {
case 1: // d_x == nulptr, d_scale == nullptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, false,
false><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 2: // d_x == nullptr, d_scale != nullptr, d_bias == nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, false,
true><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 3: // d_x == nullptr, d_scale != nulptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientAll<
T, U, kBlockDim, false><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 4: // d_x != nullptr, d_scale == nullptr, d_bias == nullptr
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormBackwardGradientOnlyDX<
T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
x, d_y, d_x, mean, var, scale, epsilon, feature_size));
}
break;
case 5: // d_x != nulptr, d_scale == nullptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, true,
false><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
x, d_x, mean, var, epsilon, feature_size));
}
break;
case 6: // d_x != nullptr, d_scale != nullptr, d_bias == nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, true,
true><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
x, d_x, mean, var, epsilon, feature_size));
}
break;
case 7: // d_x != nullptr, d_scale != nullptr, d_bias != nullptr
{
constexpr int VPT = 4;
constexpr int BDIMX2 = 32;
constexpr int BDIMY2 = 4;
dim3 threads2(BDIMX2, BDIMY2, 1);
constexpr int part_size = BDIMY2 * VPT;
const dim3 blocks2((feature_size + BDIMX2 - 1) / BDIMX2, part_size, 1);
auto part_grad_gamma_ptr =
memory::Alloc(dev_ctx, part_size * feature_size * sizeof(U));
auto part_grad_beta_ptr =
memory::Alloc(dev_ctx, part_size * feature_size * sizeof(U));
U *part_grad_gamma = reinterpret_cast<U *>(part_grad_gamma_ptr->ptr());
U *part_grad_beta = reinterpret_cast<U *>(part_grad_beta_ptr->ptr());
LayerNormBackwardPartGradGammaBeta<T, U, BDIMX2, BDIMY2,
VPT><<<blocks2, threads2, 0, stream>>>(
d_y, x, batch_size, feature_size, mean, var, epsilon, part_grad_gamma,
part_grad_beta); // compute part_grad_gamma, beta
constexpr int BDIMX3 = 32;
constexpr int BDIMY3 = 8;
dim3 threads3(BDIMX3, BDIMY3, 1);
const dim3 blocks3((feature_size + BDIMX2 - 1) / BDIMX2, 1, 1);
LayerNormBackwardSumGradGammaBeta<
T, U, BDIMX3, BDIMY3><<<blocks3, threads3, 0, stream>>>(
part_grad_gamma, part_grad_beta, part_size, batch_size, feature_size,
d_scale, d_bias);
constexpr int BDIMX1 = 32;
constexpr int BDIMY1 = 4;
dim3 threads1(BDIMX1, BDIMY1, 1);
LayerNormBackwardComputeGradInput<
T, U, BDIMX1, BDIMY1><<<batch_size, threads1, 0, stream>>>(
d_y, x, batch_size, feature_size, mean, var, epsilon, scale, d_x);
break;
}
default:
break;
}
}
template <typename T>
void LayerNormDirectCUDAFunctor<T>::operator()(cudaStream_t stream,
const T *input,
std::vector<int> input_shape,
const T *bias, const T *scale,
T *output, T *mean, T *variance,
int begin_norm_axis, float eps) {
const auto x_dims = framework::make_ddim(input_shape);
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormForward<T, T, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
input, scale, bias, output, mean, variance, eps, feature_size));
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Product from begin_norm_axis to end in layer_norm must be larger "
"than 1"));
break;
}
}
template <typename T>
class LayerNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const float epsilon = ctx.Attr<float>("epsilon");
auto *scale = ctx.Input<Tensor>("Scale");
auto *bias = ctx.Input<Tensor>("Bias");
auto *x = ctx.Input<Tensor>("X");
auto *y = ctx.Output<Tensor>("Y");
auto *mean = ctx.Output<Tensor>("Mean");
auto *var = ctx.Output<Tensor>("Variance");
const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis");
const auto x_dims = x->dims();
auto *x_data = x->data<T>();
auto *y_data = y->mutable_data<T>(ctx.GetPlace());
auto *mean_data = mean->mutable_data<U>(ctx.GetPlace());
auto *var_data = var->mutable_data<U>(ctx.GetPlace());
auto *scale_data = (scale == nullptr ? nullptr : scale->data<U>());
auto *bias_data = (bias == nullptr ? nullptr : bias->data<U>());
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
auto stream = ctx.cuda_device_context().stream();
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormForward<T, U,
kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
x_data, scale_data, bias_data, y_data, mean_data, var_data,
epsilon, feature_size));
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Product from begin_norm_axis to end must be larger than 1"));
break;
}
}
};
template <typename T>
class LayerNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const float epsilon = ctx.Attr<float>("epsilon");
// d_x, d_scale, d_bias may be nullptr
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
auto *x = ctx.Input<Tensor>("X");
auto *mean = ctx.Input<Tensor>("Mean");
auto *var = ctx.Input<Tensor>("Variance");
auto *scale = ctx.Input<Tensor>("Scale");
auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto *x_data = x->data<T>();
auto *d_y_data = d_y->data<T>();
auto *mean_data = mean->data<U>();
auto *var_data = var->data<U>();
auto *scale_data = (scale == nullptr ? nullptr : scale->data<U>());
auto *d_scale_data =
(d_scale == nullptr ? nullptr
: d_scale->mutable_data<U>(ctx.GetPlace()));
auto *d_bias_data =
(d_bias == nullptr ? nullptr : d_bias->mutable_data<U>(ctx.GetPlace()));
auto *d_x_data =
(d_x == nullptr ? nullptr : d_x->mutable_data<T>(ctx.GetPlace()));
const auto &x_dims = x->dims();
const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis");
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
LayerNormBackward<T, U>(x_data, d_y_data, scale_data, mean_data, var_data,
d_x_data, d_scale_data, d_bias_data, epsilon,
batch_size, feature_size, ctx);
}
};
template class LayerNormDirectCUDAFunctor<float>;
#undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE
#undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE
#undef FIXED_BLOCK_DIM_CASE_BASE
#undef FIXED_BLOCK_DIM_CASE
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
layer_norm,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, float>,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, double>,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
layer_norm_grad,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext,
plat::float16>);
|
7da045c99f051dbf0bf32614995ea462e5197e18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal z -> c d s
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
// ELL SpMV kernel
//Michael Garland
__global__ void
zgeelltmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
magmaDoubleComplex val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELL SpMV kernel
//Michael Garland
__global__ void
zgeelltmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex lambda,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaDoubleComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
magmaDoubleComplex val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zgeelltmv_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
lambda magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zgeelltmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex lambda,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
int offset,
int blocksize,
magmaIndex_ptr addrows,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
magmaDoubleComplex tmp_shift;
//magma_zsetvector(1,&lambda,1,&tmp_shift,1);
tmp_shift = lambda;
hipLaunchKernelGGL(( zgeelltmv_kernel_shift), dim3(grid), dim3(threads), 0, queue ,
m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
| 7da045c99f051dbf0bf32614995ea462e5197e18.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal z -> c d s
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
// ELL SpMV kernel
//Michael Garland
__global__ void
zgeelltmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
magmaDoubleComplex val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELL SpMV kernel
//Michael Garland
__global__ void
zgeelltmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex lambda,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaDoubleComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = dcolind [ num_rows * n + row ];
magmaDoubleComplex val = dval [ num_rows * n + row ];
if( val != 0)
dot += val * dx[col ];
}
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
zgeelltmv_kernel<<< grid, threads, 0, queue >>>
( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
lambda magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zgeelltmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaDoubleComplex alpha,
magmaDoubleComplex lambda,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
int offset,
int blocksize,
magmaIndex_ptr addrows,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
magma_int_t threads = BLOCK_SIZE;
magmaDoubleComplex tmp_shift;
//magma_zsetvector(1,&lambda,1,&tmp_shift,1);
tmp_shift = lambda;
zgeelltmv_kernel_shift<<< grid, threads, 0, queue >>>
( m, n, nnz_per_row, alpha, tmp_shift, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
|
8a2a7946fc9c86f745d6da1d3b24d06fa5f264c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuNDArray_reductions.h"
#include "setup_grid.h"
#include <thrust/extrema.h>
namespace Gadgetron {
template<class T> static void
find_stride( cuNDArray<T> *in, size_t dim, size_t *stride, std::vector<size_t> *dims )
{
*stride = 1;
for( unsigned int i=0; i<in->get_number_of_dimensions(); i++ ){
if( i != dim )
dims->push_back(in->get_size(i));
if( i < dim )
*stride *= in->get_size(i);
}
}
// Sum
//
template<class T>
__global__ void sum_kernel( T *in, T *out,
unsigned int stride, unsigned int number_of_batches, unsigned int number_of_elements )
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
if( idx < number_of_elements ){
unsigned int in_idx = (idx/stride)*stride*number_of_batches+(idx%stride);
T val = in[in_idx];
for( unsigned int i=1; i<number_of_batches; i++ )
val += in[i*stride+in_idx];
out[idx] = val;
}
}
// Sum
//
template<class T> boost::shared_ptr< cuNDArray<T> > sum( cuNDArray<T> *in, unsigned int dim )
{
// Some validity checks
if( !(in->get_number_of_dimensions()>1) ){
throw std::runtime_error("sum: underdimensioned.");;
}
if( dim > in->get_number_of_dimensions()-1 ){
throw std::runtime_error( "sum: dimension out of range.");;
}
unsigned int number_of_batches = in->get_size(dim);
unsigned int number_of_elements = in->get_number_of_elements()/number_of_batches;
// Setup block/grid dimensions
dim3 blockDim; dim3 gridDim;
setup_grid( number_of_elements, &blockDim, &gridDim );
// Find element stride
size_t stride; std::vector<size_t> dims;
find_stride<T>( in, dim, &stride, &dims );
// Invoke kernel
boost::shared_ptr< cuNDArray<T> > out(new cuNDArray<T>());
out->create(&dims);
hipLaunchKernelGGL(( sum_kernel<T>), dim3(gridDim), dim3(blockDim) , 0, 0, in->get_data_ptr(), out->get_data_ptr(), stride, number_of_batches, number_of_elements );
CHECK_FOR_CUDA_ERROR();
return out;
}
template<class T> T mean(cuNDArray<T>* in)
{
return thrust::reduce(in->begin(),in->end(),T(0),thrust::plus<T>())/T(in->get_number_of_elements());
}
template<class T> T min(cuNDArray<T>* in)
{
return *thrust::min_element(in->begin(),in->end());
}
template<class T> T max(cuNDArray<T>* in)
{
return *thrust::max_element(in->begin(),in->end());
}
template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > sum<float>( cuNDArray<float>*, unsigned int);
template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > sum<double>( cuNDArray<double>*, unsigned int);
template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > sum<float_complext>( cuNDArray<float_complext>*, unsigned int);
template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > sum<double_complext>( cuNDArray<double_complext>*, unsigned int);
template EXPORTGPUCORE float mean<float>(cuNDArray<float>*);
template EXPORTGPUCORE float_complext mean<float_complext>(cuNDArray<float_complext>*);
template EXPORTGPUCORE double mean<double>(cuNDArray<double>*);
template EXPORTGPUCORE double_complext mean<double_complext>(cuNDArray<double_complext>*);
template EXPORTGPUCORE float min<float>(cuNDArray<float>*);
template EXPORTGPUCORE float max<float>(cuNDArray<float>*);
template EXPORTGPUCORE double min<double>(cuNDArray<double>*);
template EXPORTGPUCORE double max<double>(cuNDArray<double>*);
}
| 8a2a7946fc9c86f745d6da1d3b24d06fa5f264c2.cu | #include "cuNDArray_reductions.h"
#include "setup_grid.h"
#include <thrust/extrema.h>
namespace Gadgetron {
template<class T> static void
find_stride( cuNDArray<T> *in, size_t dim, size_t *stride, std::vector<size_t> *dims )
{
*stride = 1;
for( unsigned int i=0; i<in->get_number_of_dimensions(); i++ ){
if( i != dim )
dims->push_back(in->get_size(i));
if( i < dim )
*stride *= in->get_size(i);
}
}
// Sum
//
template<class T>
__global__ void sum_kernel( T *in, T *out,
unsigned int stride, unsigned int number_of_batches, unsigned int number_of_elements )
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
if( idx < number_of_elements ){
unsigned int in_idx = (idx/stride)*stride*number_of_batches+(idx%stride);
T val = in[in_idx];
for( unsigned int i=1; i<number_of_batches; i++ )
val += in[i*stride+in_idx];
out[idx] = val;
}
}
// Sum
//
template<class T> boost::shared_ptr< cuNDArray<T> > sum( cuNDArray<T> *in, unsigned int dim )
{
// Some validity checks
if( !(in->get_number_of_dimensions()>1) ){
throw std::runtime_error("sum: underdimensioned.");;
}
if( dim > in->get_number_of_dimensions()-1 ){
throw std::runtime_error( "sum: dimension out of range.");;
}
unsigned int number_of_batches = in->get_size(dim);
unsigned int number_of_elements = in->get_number_of_elements()/number_of_batches;
// Setup block/grid dimensions
dim3 blockDim; dim3 gridDim;
setup_grid( number_of_elements, &blockDim, &gridDim );
// Find element stride
size_t stride; std::vector<size_t> dims;
find_stride<T>( in, dim, &stride, &dims );
// Invoke kernel
boost::shared_ptr< cuNDArray<T> > out(new cuNDArray<T>());
out->create(&dims);
sum_kernel<T><<< gridDim, blockDim >>>( in->get_data_ptr(), out->get_data_ptr(), stride, number_of_batches, number_of_elements );
CHECK_FOR_CUDA_ERROR();
return out;
}
template<class T> T mean(cuNDArray<T>* in)
{
return thrust::reduce(in->begin(),in->end(),T(0),thrust::plus<T>())/T(in->get_number_of_elements());
}
template<class T> T min(cuNDArray<T>* in)
{
return *thrust::min_element(in->begin(),in->end());
}
template<class T> T max(cuNDArray<T>* in)
{
return *thrust::max_element(in->begin(),in->end());
}
template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > sum<float>( cuNDArray<float>*, unsigned int);
template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > sum<double>( cuNDArray<double>*, unsigned int);
template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float_complext> > sum<float_complext>( cuNDArray<float_complext>*, unsigned int);
template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double_complext> > sum<double_complext>( cuNDArray<double_complext>*, unsigned int);
template EXPORTGPUCORE float mean<float>(cuNDArray<float>*);
template EXPORTGPUCORE float_complext mean<float_complext>(cuNDArray<float_complext>*);
template EXPORTGPUCORE double mean<double>(cuNDArray<double>*);
template EXPORTGPUCORE double_complext mean<double_complext>(cuNDArray<double_complext>*);
template EXPORTGPUCORE float min<float>(cuNDArray<float>*);
template EXPORTGPUCORE float max<float>(cuNDArray<float>*);
template EXPORTGPUCORE double min<double>(cuNDArray<double>*);
template EXPORTGPUCORE double max<double>(cuNDArray<double>*);
}
|
b17761eb7527af6f29288b9a3bfa971cb5abdc73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Image.cuh"
__device__ __host__ jax::Image::Camera::Camera(){
this->cam_rot = {0.0f,0.0f,0.0f};
this->cam_pos = {0.0f,0.0f,0.0f};
this->fov = {0.0f,0.0f};
this->foc = 0;
this->dpix = {0.0f,0.0f};
this->size = {0,0};
}
__device__ __host__ jax::Image::Camera::Camera(uint2 size){
this->cam_rot = {0.0f,0.0f,0.0f};
this->cam_pos = {0.0f,0.0f,0.0f};
this->fov = {0.0f,0.0f};
this->foc = 0;
this->dpix = {0.0f,0.0f};
this->size = {0,0};
}
__device__ __host__ jax::Image::Camera::Camera(uint2 size, float3 cam_pos, float3 cam_rot){
this->cam_pos = cam_pos;
this->cam_rot = cam_rot;
this->fov = {0.0f,0.0f};
this->foc = 0;
this->dpix = {0.0f,0.0f};
this->size = size;
}
jax::Image::Image(){
this->id = -1;
this->filePath = "n/a";
}
jax::Image::Image(uint2 size, unsigned int colorDepth, Unity<unsigned char>* pixels){
this->filePath = "n/a";
this->id = -1;
this->colorDepth = colorDepth;
this->pixels = pixels;
this->camera.size = size;
this->size = size;
}
jax::Image::Image(std::string filePath, int id) {
std::string filename = getFileFromFilePath(filePath);
this->filePath = filePath;
this->id = id;
this->colorDepth = 1;
unsigned char* pixels_host = nullptr;
// find the image extension
std::string extension = getFileExtension(filePath);
if(extension == "png"){ // load if PNG
pixels_host = readPNG(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
else if(extension == "tiff" || extension == "tif"){ // load if TIFF
pixels_host = readTIFF(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
else if(extension == "jpeg" || extension == "jpg"){ // load if JPG
pixels_host = readJPEG(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
// set some initial params
this->camera.size = this->size;
this->size = size;
this->pixels = new Unity<unsigned char>(pixels_host,this->size.y*this->size.x*this->colorDepth,cpu);
// read additional params, and if the param requirement is removed then don't do any of this
// checks that the image is not a seed image. extra params are not needed for seed images
if (id != -1){
std::string params_path = getFolderFromFilePath(filePath);
// defaults to reading ascii params if both exist
if (fileExists(params_path + "/params.csv")){// read in the file as an ASCII enoding
std::cout << "Reading ASCII encoded camera parameters ..." << std::endl;
std::cout << "Looking for matching file " << filename << std::endl;
// you know, this could be cleaner and generalized but idk if we wil ever want a csv reader other than here
std::ifstream file(params_path + "/params.csv"); // declare file stream: http://www.cplusplus.com/reference/iostream/ifstream/
std::string value;
while (file.good()){
// wait until we find the filename, or maybe we don't and it was empty. in that case nothing happens
getline(file,value,','); // read a string until next comma: http://www.cplusplus.com/reference/string/getline/
// sanitize the input
value.erase(std::remove(value.begin(), value.end(), '\n'), value.end());
if (filename == value){ // if we have a match, read in the parameters one by one
getline(file,value,',');
this->camera.cam_pos.x = std::atof(value.c_str());
getline(file,value,',');
this->camera.cam_pos.y = std::atof(value.c_str());
getline(file,value,',');
this->camera.cam_pos.z = std::atof(value.c_str());
getline(file,value,',');
this->camera.cam_rot.x = std::atof(value.c_str());
getline(file,value,',');
this->camera.cam_rot.y = std::atof(value.c_str());
getline(file,value,',');
this->camera.cam_rot.z = std::atof(value.c_str());
getline(file,value,',');
this->camera.fov.x = std::atof(value.c_str());
getline(file,value,',');
this->camera.fov.y = std::atof(value.c_str());
getline(file,value,',');
this->camera.foc = std::atof(value.c_str());
getline(file,value,',');
// this->camera.dpix.x = std::atof(value.c_str());
// uses pinhole camera assumption
this->camera.dpix.x = (this->camera.foc * tanf(this->camera.fov.x / 2.0f)) / (this->camera.size.x / 2.0f );
getline(file,value,',');
// this->camera.dpix.y = std::atof(value.c_str());
// uses pinhole camera assumption
this->camera.dpix.y = this->camera.dpix.x;
getline(file,value,',');
this->camera.timeStamp = std::strtoll(value.c_str(), NULL, 0);
getline(file,value,',');
// camera.size.x was already set
getline(file,value,',');
// camera.side.y was already set
file.close();
break;
}
}
file.close();
} else if (fileExists(params_path + "/params.bcp")) {
std::cout << "Reading BCP encoded camera parameters ..." << std::endl;
// TODO read in binary incoded guys here
} else { // if no config file was found!
std::cerr << "NO CAMERA PARAM FILE FOUND, at least an empty params.csv or params.bcp is required. To disable this requirement use the flag -np or -noparams" << std::endl;
// std::throw -1; // TODO make this throw an exception
}
}
std::cout << "filePath: " << filePath << std::endl;
}
jax::Image::Image(std::string filePath, unsigned int convertColorDepthTo, int id){
this->filePath = filePath;
this->id = id;
this->colorDepth = 1;
unsigned char* pixels_host = nullptr;
std::string extension = getFileExtension(filePath);
if(extension == "png"){
pixels_host = readPNG(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
else if(extension == "tiff" || extension == "tif"){
pixels_host = readTIFF(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
else if(extension == "jpeg" || extension == "jpg"){
pixels_host = readJPEG(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
this->camera.size = this->size;
this->size = size;
this->pixels = new Unity<unsigned char>(pixels_host,this->size.y*this->size.x*this->colorDepth,cpu);
for(int i = 0; i < this->pixels->size(); ++i){
std::cout<<this->pixels->host[i]<<std::endl;
}
if(convertColorDepthTo == 1){
convertToBW(this->pixels, this->colorDepth);
this->colorDepth = 1;
}
else if(convertColorDepthTo != 0){
std::cerr<<"ERROR: Image() does not currently support conversion to anything but BW"<<std::endl;
exit(-1);
}
}
jax::Image::~Image(){
if(this->pixels != nullptr){
delete this->pixels;
}
}
void jax::Image::convertColorDepthTo(unsigned int colorDepth){
std::cout<<"Converting pixel depth to "<<colorDepth<<" from "<<this->colorDepth<<std::endl;
if(colorDepth == 1){
convertToBW(this->pixels,this->colorDepth);
this->colorDepth = 1;
}
else if (colorDepth == 3){
convertToRGB(this->pixels,this->colorDepth);
this->colorDepth = 3;
}
else{
std::cerr<<colorDepth<<" is currently not supported in convertColorDepthTo"<<std::endl;
exit(-1);
}
}
jax::Unity<int2>* jax::Image::getPixelGradients(){
return generatePixelGradients(this->size,this->pixels);
}
void jax::Image::alterSize(int scalingFactor){
if(scalingFactor == 0){
std::cerr<<"using a binDepth of 0 results in no binning or upsampling\nuse binDepth>0 for binning and binDepth<0 for upsampling"<<std::endl;
return;
}
else if((float)this->size.x/powf(2.0f,scalingFactor) < 1.0f ||(float)this->size.y/powf(2.0f,scalingFactor) < 1.0f){
std::cerr<<"ERROR binning "<<scalingFactor<<" many times cannot be done on and image of size "<<this->size.x<<"x"<<this->size.y<<std::endl;
exit(-1);
}
MemoryState origin = this->pixels->getMemoryState();
if(origin != gpu) this->pixels->setMemoryState(gpu);
uint2 scaler = {2,2};
if(scalingFactor < 0){//upsampling
for(int i = 0; i < abs(scalingFactor); ++i){
this->pixels->setData(upsample(this->size,this->pixels)->device,this->size.x*this->size.y*this->colorDepth*4,gpu);
this->size = this->size*scaler;
}
}
else{//downsampling
for(int i = 0; i < scalingFactor; ++i){
this->pixels->setData(bin(this->size,this->pixels)->device,(this->size.x*this->size.y*this->colorDepth)/4,gpu);
this->size = this->size/scaler;
}
}
if(origin != gpu) this->pixels->setMemoryState(origin);
}
jax::Unity<unsigned char>* jax::addBufferBorder(uint2 size, jax::Unity<unsigned char>* pixels, int2 border){
if(border.x == 0 && border.y == 0){
std::cerr<<"ERROR border cannot be 0"<<std::endl;
exit(-1);
}
if(border.x*2 + (int) size.x < 0 || border.y*2 + (int)size.y < 0){
std::cerr<<"ERROR border causes negative dimensions"<<std::endl;
exit(-1);
}
if(pixels->size()%((int)size.x*size.y) != 0){
std::cerr<<"ERROR color depth cannot be determined due to pixels->size()%(size.x*size.y) != 0"<<std::endl;
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
uint2 newSize = {size.x + (border.x*2),size.y + (border.y*2)};
int colorDepth = pixels->size()/((int)size.x*size.y);
Unity<unsigned char>* bufferedPixels = new Unity<unsigned char>(nullptr,newSize.x*newSize.y*colorDepth,gpu);
for(int y = border.y; y < (int)size.y + border.y; ++y){
CudaSafeCall(hipMemcpy(bufferedPixels->device + (y*newSize.x) + border.x,pixels->device + (y*size.x),size.x*sizeof(unsigned char),hipMemcpyDeviceToDevice));
}
if(origin != gpu){
bufferedPixels->setMemoryState(origin);
pixels->setMemoryState(origin);
}
return bufferedPixels;
}
jax::Unity<float>* jax::addBufferBorder(uint2 size, jax::Unity<float>* pixels, int2 border){
if(border.x == 0 && border.y == 0){
std::cerr<<"ERROR border cannot be 0"<<std::endl;
exit(-1);
}
if(border.x*2 + (int) size.x < 0 || border.y*2 + (int)size.y < 0){
std::cerr<<"ERROR border causes negative dimensions"<<std::endl;
exit(-1);
}
if(pixels->size()%((int)size.x*size.y) != 0){
std::cerr<<"ERROR color depth cannot be determined due to pixels->size()%(size.x*size.y) != 0"<<std::endl;
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
uint2 newSize = {size.x + (border.x*2),size.y + (border.y*2)};
int colorDepth = pixels->size()/((int)size.x*size.y);
Unity<float>* bufferedPixels = new Unity<float>(nullptr,newSize.x*newSize.y*colorDepth,gpu);
for(int y = 0; y < (int)size.y; ++y){
CudaSafeCall(hipMemcpy(bufferedPixels->device + ((y+border.y)*newSize.x) + border.x,pixels->device + (y*size.x),size.x*sizeof(float),hipMemcpyDeviceToDevice));
}
if(origin != gpu){
bufferedPixels->setMemoryState(origin);
pixels->setMemoryState(origin);
}
return bufferedPixels;
}
jax::Unity<unsigned char>* jax::convertImageToChar(Unity<float>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != cpu) pixels->setMemoryState(gpu);
normalizeImage(pixels);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
getFlatGridBlock(pixels->size(),grid,block,convertToCharImage);
Unity<unsigned char>* castPixels = new Unity<unsigned char>(nullptr,pixels->size(),gpu);
hipLaunchKernelGGL(( convertToCharImage), dim3(grid),dim3(block), 0, 0, pixels->size(),castPixels->device,pixels->device);
hipDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
castPixels->setMemoryState(origin);
}
return castPixels;
}
jax::Unity<float>* jax::convertImageToFlt(Unity<unsigned char>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
getFlatGridBlock(pixels->size(),grid,block,convertToFltImage);
Unity<float>* castPixels = new Unity<float>(nullptr,pixels->size(),gpu);
hipLaunchKernelGGL(( convertToFltImage), dim3(grid),dim3(block), 0, 0, pixels->size(),pixels->device,castPixels->device);
hipDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
castPixels->setMemoryState(origin);
}
return castPixels;
}
//todo use cuda reduction instead of cpu loop for min max finding
//todo add support for color depth
void jax::normalizeImage(Unity<float>* pixels){
MemoryState origin = pixels->getMemoryState();
float2 minMax = {FLT_MAX,-FLT_MAX};
if(pixels->getFore() != both) pixels->setMemoryState(both);
for(int i = 0; i < pixels->size(); ++i){
if(minMax.x > pixels->host[i]) minMax.x = pixels->host[i];
if(minMax.y < pixels->host[i]) minMax.y = pixels->host[i];
}
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(unsigned long, float*, float2) = &normalize;
getFlatGridBlock(pixels->size(),grid,block,fp);
hipLaunchKernelGGL(( normalize), dim3(grid),dim3(block), 0, 0, pixels->size(),pixels->device,minMax);
hipDeviceSynchronize();
CudaCheckError();
pixels->setFore(gpu);
if(origin != both) pixels->setMemoryState(origin);
}
void jax::normalizeImage(Unity<float>* pixels, float2 minMax){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(unsigned long, float*, float2) = &normalize;
getFlatGridBlock(pixels->size(),grid,block,fp);
hipLaunchKernelGGL(( normalize), dim3(grid),dim3(block), 0, 0, pixels->size(),pixels->device,minMax);
pixels->setFore(gpu);
hipDeviceSynchronize();
CudaCheckError();
if(origin != gpu) pixels->setMemoryState(origin);
}
void jax::convertToBW(Unity<unsigned char>* pixels, unsigned int colorDepth){
if(colorDepth == 1){
std::cout<<"Pixels are already bw"<<std::endl;
return;
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
unsigned int numPixels = (pixels->size()/colorDepth);
unsigned char* bwPixels_device;
CudaSafeCall(hipMalloc((void**)&bwPixels_device, numPixels*sizeof(unsigned char)));
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
getFlatGridBlock(numPixels, grid, block,generateBW);
hipLaunchKernelGGL(( generateBW), dim3(grid),dim3(block), 0, 0, numPixels, colorDepth, pixels->device, bwPixels_device);
hipDeviceSynchronize();
CudaCheckError();
pixels->setData(bwPixels_device, numPixels, gpu);
if(origin != gpu) pixels->setMemoryState(origin);
}
void jax::convertToRGB(Unity<unsigned char>* pixels, unsigned int colorDepth){
if(colorDepth == 3){
std::cout<<"Pixels are already rgb"<<std::endl;
return;
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
unsigned int numPixels = (pixels->size()/colorDepth);
unsigned char* rgbPixels_device;
CudaSafeCall(hipMalloc((void**)&rgbPixels_device, numPixels*3*sizeof(unsigned char)));
dim3 grid;
dim3 block;
getFlatGridBlock(numPixels, grid, block,generateRGB);
hipLaunchKernelGGL(( generateRGB), dim3(grid),dim3(block), 0, 0, numPixels, colorDepth, pixels->device, rgbPixels_device);
hipDeviceSynchronize();
CudaCheckError();
pixels->setData(rgbPixels_device, 3*numPixels, gpu);
if(origin != gpu) pixels->setMemoryState(origin);
}
//TODO implement
void calcFundamentalMatrix_2View(float cam0[3][3], float cam1[3][3], float (&F)[3][3]){
}
void jax::calcFundamentalMatrix_2View(Image* query, Image* target, float3 (&F)[3]){
if(query->camera.foc != target->camera.foc){
std::cout<<"ERROR calculating fundamental matrix for 2view needs to bet taken with same camera (foc&fov are same)"<<std::endl;
exit(-1);
}
float angle1;
if(abs(query->camera.cam_rot.z) < .00001) {
if(query->camera.cam_rot.y > 0) angle1 = PI/2;
else angle1 = -1*PI/2;
}
else {
angle1 = atan(query->camera.cam_rot.y / query->camera.cam_rot.z);
if(query->camera.cam_rot.z<0 && query->camera.cam_rot.y>=0) {
angle1 += PI;
}
if(query->camera.cam_rot.z<0 && query->camera.cam_rot.y<0) {
angle1 -= PI;
}
}
float3 A1[3] = {
{1, 0, 0},
{0, cos(angle1), -sin(angle1)},
{0, sin(angle1), cos(angle1)}
};
float3 temp = {0.0f,0.0f,0.0f};
multiply(A1, query->camera.cam_rot,temp);
float angle2 = 0.0f;
if(abs(temp.z) < .00001) {
if(temp.x <= 0) angle1 = PI/2;
else angle1 = -1*PI/2;
}
else {
angle2 = atan(-1*temp.x / temp.z);
if(temp.z<0 && temp.x<0) {
angle1 += PI;
}
if(temp.z<0 && temp.x>0) {
angle2 -= PI;
}
}
float3 B1[3] = {
{cos(angle2), 0, sin(angle2)},
{0, 1, 0},
{-sin(angle2), 0, cos(angle2)}
};
float3 temp2 = {0.0f,0.0f,0.0f};
multiply(B1, temp, temp2);
float3 rot1[3];
multiply(B1, A1, rot1);
float3 rot1Transpose[3];
transpose(rot1,rot1Transpose);
multiply(rot1Transpose, temp2, temp);
angle1 = 0.0f;
if(abs(target->camera.cam_rot.z) < .00001) {
if(target->camera.cam_rot.y > 0) angle1 = PI/2;
else angle1 = -1*PI/2;
}
else {
angle1 = atan(target->camera.cam_rot.y / target->camera.cam_rot.z);
if(target->camera.cam_rot.z<0 && target->camera.cam_rot.y>=0) {
angle1 += PI;
}
if(target->camera.cam_rot.z<0 && target->camera.cam_rot.y<0) {
angle1 -= PI;
}
}
float3 A2[3] = {
{1, 0, 0},
{0, cos(angle1), -sin(angle1)},
{0, sin(angle1), cos(angle1)}
};
multiply(A2, target->camera.cam_rot,temp2);
angle2 = 0.0f;
if(abs(temp2.z) < .00001) {
if(temp2.x <= 0) angle1 = PI/2;
else angle1 = -1*PI/2;
}
else {
angle2 = atan(-1*temp2.x / temp2.z);
if(temp2.z<0 && temp2.x<0) {
angle1 += PI;
}
if(temp2.z<0 && temp2.x>0) {
angle2 -= PI;
}
}
float3 B2[3] = {
{cos(angle2), 0, sin(angle2)},
{0, 1, 0},
{-sin(angle2), 0, cos(angle2)}
};
multiply(B2, temp2, temp);
float3 rot2[3];
multiply(B2, A2, rot2);
float3 rot2Transpose[3];
transpose(rot2, rot2Transpose);
multiply(rot2Transpose, temp, temp2);
float3 K[3] = {
{query->camera.foc, 0, ((float)query->size.x)/2.0f},//NOTE the foc was divided by dpix.x and dpix.y but currently using foc in pixels
{0, query->camera.foc, ((float)query->size.y)/2.0f},//NOTE the foc was divided by dpix.x and dpix.y but currently using foc in pixels
{0, 0, 1}
};
float3 K_inv[3];
inverse(K,K_inv);
float3 K_invTranspose[3];
transpose(K_inv,K_invTranspose);
float3 R[3];
multiply(rot2Transpose, rot1, R);
float3 S[3] = {
{0, query->camera.cam_pos.z - target->camera.cam_pos.z, target->camera.cam_pos.y - query->camera.cam_pos.y},
{query->camera.cam_pos.z - target->camera.cam_pos.z,0, query->camera.cam_pos.x - target->camera.cam_pos.x},
{query->camera.cam_pos.y - target->camera.cam_pos.y, target->camera.cam_pos.x - query->camera.cam_pos.x, 0}
};
float3 E[3];
multiply(R,S,E);
float3 tempF[3];
multiply(K_invTranspose, E,tempF);
multiply(tempF, K_inv, F);
std::cout << std::endl <<"between image "<<query->id<<" and "<<target->id
<<" the final fundamental matrix result is: " << std::endl;
for(int r = 0; r < 3; ++r) {
std::cout << F[r].x << " " << F[r].y << " "<< F[r].z << std::endl;
}
std::cout<<std::endl;
}
void jax::get_cam_params2view(Image* cam1, Image* cam2, std::string infile){
std::ifstream input(infile);
std::string line;
float res = 0.0f;
while(std::getline(input, line)) {
std::istringstream iss(line);
std::string param;
float arg1;
float arg2;
float arg3;
iss >> param >> arg1;
if(param.compare("foc") == 0) {
cam1->camera.foc = arg1;
cam2->camera.foc = arg1;
}
else if(param.compare("fov") == 0) {
//cam1->camera.fov = arg1;
//cam2->camera.fov = arg1;
}
else if(param.compare("res") == 0) {
res = arg1;
}
else if(param.compare("cam1C") == 0) {
iss >> arg2 >> arg3;
cam1->camera.cam_pos.x = arg1;
cam1->camera.cam_pos.y = arg2;
cam1->camera.cam_pos.z = arg3;
}
else if(param.compare("cam1V") == 0) {
iss >> arg2 >> arg3;
cam1->camera.cam_rot.x = arg1;
cam1->camera.cam_rot.y = arg2;
cam1->camera.cam_rot.z = arg3;
}
else if(param.compare("cam2C") == 0) {
iss >> arg2 >> arg3;
cam2->camera.cam_pos.x = arg1;
cam2->camera.cam_pos.y = arg2;
cam2->camera.cam_pos.z = arg3;
}
else if(param.compare("cam2V") == 0) {
iss >> arg2 >> arg3;
cam2->camera.cam_rot.x = arg1;
cam2->camera.cam_rot.y = arg2;
cam2->camera.cam_rot.z = arg3;
}
}
cam1->camera.dpix = {cam1->camera.foc*tan(cam1->camera.fov.x/2)/(cam1->size.x/2),
cam1->camera.foc*tan(cam1->camera.fov.y/2)/(cam1->size.y/2)};
cam2->camera.dpix = {cam2->camera.foc*tan(cam2->camera.fov.x/2)/(cam2->size.x/2),
cam2->camera.foc*tan(cam2->camera.fov.y/2)/(cam2->size.y/2)};
}
jax::Unity<int2>* jax::generatePixelGradients(uint2 imageSize, Unity<unsigned char>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
int2* gradients_device = nullptr;
CudaSafeCall(hipMalloc((void**)&gradients_device,pixels->size()*sizeof(int2)));
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2,unsigned char*,int2*) = &calculatePixelGradients;
getFlatGridBlock(pixels->size(),grid,block,fp);
hipLaunchKernelGGL(( calculatePixelGradients), dim3(grid),dim3(block), 0, 0, imageSize,pixels->device,gradients_device);
CudaCheckError();
if(origin != gpu) pixels->setMemoryState(origin);
return new Unity<int2>(gradients_device,pixels->size(),gpu);
}
jax::Unity<float2>* jax::generatePixelGradients(uint2 imageSize, Unity<float>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
float2* gradients_device = nullptr;
CudaSafeCall(hipMalloc((void**)&gradients_device,pixels->size()*sizeof(float2)));
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2,float*,float2*) = &calculatePixelGradients;
getFlatGridBlock(pixels->size(),grid,block,fp);
hipLaunchKernelGGL(( calculatePixelGradients), dim3(grid),dim3(block), 0, 0, imageSize,pixels->device,gradients_device);
CudaCheckError();
if(origin == cpu) pixels->setMemoryState(cpu);
return new Unity<float2>(gradients_device,pixels->size(),gpu);
}
void jax::makeBinnable(uint2 &size, Unity<unsigned char>* pixels, int plannedDepth){
MemoryState origin = pixels->getMemoryState();
int numResize = (int)pow(2, plannedDepth);
int dimOffset[2] = {(int)size.x%numResize,(int)size.y%numResize};
if(dimOffset[0] || dimOffset[1]){
if(origin != gpu) pixels->setMemoryState(gpu);
bool mustSizeUp = size.x%2 || size.y%2;
if(mustSizeUp){
pixels->setData(upsample(size,pixels)->device,pixels->size()*4,gpu);
size = size*2;numResize *= 2;
dimOffset[0] = size.x%numResize;
dimOffset[1] = size.y%numResize;
}
int2 border = {
dimOffset[0] ? (numResize-((int)size.x%numResize))/2 : 0,
dimOffset[1] ? (numResize-((int)size.y%numResize))/2 : 0
};
uint2 newSize = {border.x*2 + size.x, border.y*2 + size.y};
pixels->setData(addBufferBorder(size,pixels,border)->device,newSize.x*newSize.y,gpu);
size = newSize;
pixels->setData(bin(size,pixels)->device,pixels->size()/4,gpu);
size = size/2;
if(origin != gpu) pixels->setMemoryState(origin);
}
else{
std::cout<<"no resize necessary for binning to depth "<<plannedDepth<<std::endl;//TODO turn to verbose debug
}
}
void jax::makeBinnable(uint2 &size, Unity<float>* pixels, int plannedDepth){
MemoryState origin = pixels->getMemoryState();
int numResize = (int)pow(2, plannedDepth);
int dimOffset[2] = {(int)size.x%numResize,(int)size.y%numResize};
if(dimOffset[0] || dimOffset[1]){
if(origin != gpu) pixels->setMemoryState(gpu);
bool mustSizeUp = size.x%2 || size.y%2;
if(mustSizeUp){
pixels->setData(upsample(size,pixels)->device,pixels->size()*4,gpu);
size = size*2;numResize *= 2;
dimOffset[0] = size.x%numResize;
dimOffset[1] = size.y%numResize;
}
int2 border = {
dimOffset[0] ? (numResize-((int)size.x%numResize))/2 : 0,
dimOffset[1] ? (numResize-((int)size.y%numResize))/2 : 0
};
uint2 newSize = {border.x*2 + size.x, border.y*2 + size.y};
pixels->setData(addBufferBorder(size,pixels,border)->device,newSize.x*newSize.y,gpu);
size = newSize;
if(mustSizeUp){
pixels->setData(bin(size,pixels)->device,pixels->size()/4,gpu);
size = size/2;
}
if(origin != gpu) pixels->setMemoryState(origin);
}
else{
std::cout<<"no resize necessary for binning to depth "<<plannedDepth<<std::endl;//TODO turn to verbose debug
}
}
jax::Unity<unsigned char>* jax::bin(uint2 imageSize, Unity<unsigned char>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
Unity<unsigned char>* binnedImage = new Unity<unsigned char>(nullptr, pixels->size()/4, gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, unsigned char*, unsigned char*) = &binImage;
get2DGridBlock(imageSize/2,grid,block,fp);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
hipLaunchKernelGGL(( binImage), dim3(grid),dim3(block), 0, 0, imageSize,colorDepth,pixels->device,binnedImage->device);
hipDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
binnedImage->setMemoryState(origin);
}
return binnedImage;
}
jax::Unity<float>* jax::bin(uint2 imageSize, Unity<float>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
Unity<float>* binnedImage = new Unity<float>(nullptr, pixels->size()/4, gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, float*, float*) = &binImage;
get2DGridBlock(imageSize/2,grid,block,fp);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
hipLaunchKernelGGL(( binImage), dim3(grid),dim3(block), 0, 0, imageSize,colorDepth,pixels->device,binnedImage->device);
hipDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
binnedImage->setMemoryState(origin);
}
return binnedImage;
}
jax::Unity<unsigned char>* jax::upsample(uint2 imageSize, Unity<unsigned char>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, unsigned char*, unsigned char*) = &upsampleImage;
get2DGridBlock(imageSize*2,grid,block,fp);
Unity<unsigned char>* upsampledImage = new Unity<unsigned char>(nullptr, pixels->size()*4, gpu);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
hipLaunchKernelGGL(( upsampleImage), dim3(grid),dim3(block), 0, 0, imageSize,colorDepth,pixels->device,upsampledImage->device);
hipDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
upsampledImage->setMemoryState(origin);
}
return upsampledImage;
}
jax::Unity<float>* jax::upsample(uint2 imageSize, Unity<float>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, float*, float*) = &upsampleImage;
get2DGridBlock(imageSize*2,grid,block,fp);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
Unity<float>* upsampledImage = new Unity<float>(nullptr, pixels->size()*4, gpu);
hipLaunchKernelGGL(( upsampleImage), dim3(grid),dim3(block), 0, 0, imageSize,colorDepth,pixels->device,upsampledImage->device);
hipDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
upsampledImage->setMemoryState(origin);
}
return upsampledImage;
}
jax::Unity<unsigned char>* jax::scaleImage(uint2 imageSize, Unity<unsigned char>* pixels, float outputPixelWidth){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
unsigned char* sampledImage_device = nullptr;
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, unsigned char*, unsigned char*, float) = &bilinearInterpolation;
get2DGridBlock((imageSize/outputPixelWidth) + 1, grid,block,fp);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
CudaSafeCall(hipMalloc((void**)&sampledImage_device,pixels->size()*4*sizeof(unsigned char)));
hipLaunchKernelGGL(( bilinearInterpolation), dim3(grid),dim3(block), 0, 0, imageSize,colorDepth,pixels->device,sampledImage_device,outputPixelWidth);
hipDeviceSynchronize();
CudaCheckError();
Unity<unsigned char>* sampledImage = new Unity<unsigned char>(sampledImage_device, pixels->size()/(outputPixelWidth*outputPixelWidth), gpu);
if(origin != gpu){
pixels->setMemoryState(origin);
sampledImage->setMemoryState(origin);
}
return sampledImage;
}
jax::Unity<float>* jax::scaleImage(uint2 imageSize, Unity<float>* pixels, float outputPixelWidth){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
float* sampledImage_device = nullptr;
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, float*, float*, float) = &bilinearInterpolation;
get2DGridBlock((imageSize/outputPixelWidth) + 1, grid,block,fp);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
CudaSafeCall(hipMalloc((void**)&sampledImage_device,pixels->size()*4*sizeof(float)));
hipLaunchKernelGGL(( bilinearInterpolation), dim3(grid),dim3(block), 0, 0, imageSize,colorDepth,pixels->device,sampledImage_device,outputPixelWidth);
hipDeviceSynchronize();
CudaCheckError();
Unity<float>* sampledImage = new Unity<float>(sampledImage_device, pixels->size()/(outputPixelWidth*outputPixelWidth), gpu);
if(origin != gpu){
pixels->setMemoryState(origin);
sampledImage->setMemoryState(origin);
}
return sampledImage;
}
jax::Unity<float>* jax::convolve(uint2 imageSize, Unity<unsigned char>* pixels, int2 kernelSize, float* kernel, bool symmetric){
if(kernelSize.x%2 == 0 || kernelSize.y%2 == 0){
std::cerr<<"ERROR kernel for image convolution must have an odd dimension"<<std::endl;
exit(-1);
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
Unity<float>* convolvedImage = new Unity<float>(nullptr,pixels->size(),gpu);
float* kernel_device = nullptr;
CudaSafeCall(hipMalloc((void**)&kernel_device,kernelSize.x*kernelSize.y*sizeof(float)));
CudaSafeCall(hipMemcpy(kernel_device,kernel,kernelSize.x*kernelSize.y*sizeof(float),hipMemcpyHostToDevice));
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
if(symmetric){
void (*fp)(uint2, unsigned char*, unsigned int, int2, float*, float*) = &convolveImage_symmetric;
get2DGridBlock(imageSize,grid,block,fp);
hipLaunchKernelGGL(( convolveImage_symmetric), dim3(grid),dim3(block), 0, 0, imageSize, pixels->device, colorDepth, kernelSize, kernel_device, convolvedImage->device);
}
else{
void (*fp)(uint2, unsigned char*, unsigned int, int2, float*, float*) = &convolveImage;
get2DGridBlock(imageSize,grid,block,fp);
hipLaunchKernelGGL(( convolveImage), dim3(grid),dim3(block), 0, 0, imageSize, pixels->device, colorDepth, kernelSize, kernel_device, convolvedImage->device);
}
hipDeviceSynchronize();
CudaCheckError();
CudaSafeCall(hipFree(kernel_device));
if(origin != gpu){
convolvedImage->setMemoryState(origin);
pixels->setMemoryState(origin);
}
return convolvedImage;
}
jax::Unity<float>* jax::convolve(uint2 imageSize, Unity<float>* pixels, int2 kernelSize, float* kernel, bool symmetric){
if(kernelSize.x%2 == 0 || kernelSize.y%2 == 0){
std::cerr<<"ERROR kernel for image convolution must have an odd dimension"<<std::endl;
exit(-1);
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
Unity<float>* convolvedImage = new Unity<float>(nullptr,pixels->size(),gpu);
float* kernel_device = nullptr;
CudaSafeCall(hipMalloc((void**)&kernel_device,kernelSize.x*kernelSize.y*sizeof(float)));
CudaSafeCall(hipMemcpy(kernel_device,kernel,kernelSize.x*kernelSize.y*sizeof(float),hipMemcpyHostToDevice));
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
if(symmetric){
void (*fp)(uint2, float*, unsigned int, int2, float*, float*) = &convolveImage_symmetric;
get2DGridBlock(imageSize,grid,block,fp);
hipLaunchKernelGGL(( convolveImage_symmetric), dim3(grid),dim3(block), 0, 0, imageSize, pixels->device, colorDepth, kernelSize, kernel_device, convolvedImage->device);
}
else{
void (*fp)(uint2, float*, unsigned int, int2, float*, float*) = &convolveImage;
get2DGridBlock(imageSize,grid,block,fp);
hipLaunchKernelGGL(( convolveImage), dim3(grid),dim3(block), 0, 0, imageSize, pixels->device, colorDepth, kernelSize, kernel_device, convolvedImage->device);
}
hipDeviceSynchronize();
CudaCheckError();
CudaSafeCall(hipFree(kernel_device));
if(origin != gpu){
convolvedImage->setMemoryState(origin);
pixels->setMemoryState(origin);
}
return convolvedImage;
}
__device__ __host__ __forceinline__ int jax::getSymmetrizedCoord(int i, unsigned int l){
int ll = 2*l;
i = (i+ll)%ll;
return (i>l-1) ? i = ll - 1 - i : i;
}
__device__ __host__ __forceinline__ unsigned char jax::bwaToBW(const uchar2 &color){
return (1-color.y)*color.x + color.y*color.x;
}
__device__ __host__ __forceinline__ unsigned char jax::rgbToBW(const uchar3 &color){
return (color.x/4) + (color.y/2) + (color.z/4);
}
__device__ __host__ __forceinline__ unsigned char jax::rgbaToBW(const uchar4 &color){
return rgbToBW(rgbaToRGB(color));
}
__device__ __host__ __forceinline__ uchar3 jax::bwToRGB(const unsigned char &color){
int colorTemp = (int) color*10;
return {(unsigned char)(colorTemp/4),(unsigned char)(colorTemp/2),(unsigned char)(colorTemp/4)};
}
__device__ __host__ __forceinline__ uchar3 jax::bwaToRGB(const uchar2 &color){
return {color.x,color.y,(unsigned char)((color.x/3)*2 + (color.y/3))};
}
__device__ __host__ __forceinline__ uchar3 jax::rgbaToRGB(const uchar4 &color){
return {
(unsigned char)((1-color.w)*color.x + color.w*color.x),
(unsigned char)((1-color.w)*color.y + color.w*color.y),
(unsigned char)((1-color.w)*color.z + color.w*color.z),
};
}
__global__ void jax::generateBW(int numPixels, unsigned int colorDepth, unsigned char* colorPixels, unsigned char* pixels){
unsigned int globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < numPixels){
int numValues = (int) colorDepth;
switch(numValues){
case 2:
pixels[globalID] = bwaToBW({colorPixels[globalID*numValues],colorPixels[globalID*numValues + 1]});
break;
case 3:
pixels[globalID] = rgbToBW({colorPixels[globalID*numValues],colorPixels[globalID*numValues + 1], colorPixels[globalID*numValues + 2]});
break;
case 4:
pixels[globalID] = rgbaToBW({colorPixels[globalID*numValues],colorPixels[globalID*numValues + 1], colorPixels[globalID*numValues + 2], colorPixels[globalID*numValues + 3]});
break;
default:
printf("ERROR colorDepth of %u is not supported\n",numValues);
asm("trap;");
}
}
}
__global__ void jax::generateRGB(int numPixels, unsigned int colorDepth, unsigned char* colorPixels, unsigned char* pixels){
unsigned int globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < numPixels){
int numValues = colorDepth;
uchar3 value;
switch(numValues){
case 1:
value = bwToRGB(colorPixels[globalID]);
break;
case 2:
value = bwaToRGB({colorPixels[globalID*numValues],colorPixels[globalID*numValues + 1]});
break;
case 4:
value = rgbaToRGB({colorPixels[globalID*numValues],colorPixels[globalID*numValues + 1], colorPixels[globalID*numValues + 2], colorPixels[globalID*numValues + 3]});
break;
default:
printf("ERROR colorDepth of %u is not supported\n",numValues);
asm("trap;");
}
pixels[globalID*3] = value.x;
pixels[globalID*3 + 1] = value.y;
pixels[globalID*3 + 2] = value.z;
}
}
__global__ void jax::binImage(uint2 imageSize, unsigned int colorDepth, unsigned char* pixels, unsigned char* binnedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x < imageSize.x/2 && y < imageSize.y/2){
for(int d = 0; d < colorDepth; ++d){
float sumPix = pixels[y*colorDepth*2*imageSize.x + (x*2*colorDepth) + d] +
pixels[(y*2+1)*colorDepth*imageSize.x + (x*2*colorDepth) + d] +
pixels[y*2*colorDepth*imageSize.x + ((x*2+1)*colorDepth) + d] +
pixels[(y*2+1)*colorDepth*imageSize.x + ((x*2+1)*colorDepth) + d];
binnedImage[y*colorDepth*(imageSize.x/2) + (x*colorDepth) + d] = (unsigned char) roundf(sumPix/4.0f);
}
}
}
__global__ void jax::upsampleImage(uint2 imageSize, unsigned int colorDepth, unsigned char* pixels, unsigned char* upsampledImage){
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < imageSize.x*2 && j < imageSize.y*2){
float x = i*0.5f;
float y = j*0.5f;
int xm = getSymmetrizedCoord((int)x,imageSize.x);
int xp = getSymmetrizedCoord((int)x + 1,imageSize.x);
int ym = getSymmetrizedCoord((int)y,imageSize.y);
int yp = getSymmetrizedCoord((int)y + 1,imageSize.y);
float2 interLocDiff = {x-floor(x),y-floor(y)};
for(int d = 0; d < colorDepth; ++d){
float sumPix = interLocDiff.x*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1.0f-interLocDiff.x)*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xm*colorDepth + d]);
sumPix += interLocDiff.x*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1-interLocDiff.x)*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xm*colorDepth + d]);
upsampledImage[j*colorDepth*(imageSize.x*2) + i*colorDepth + d] = (unsigned char) sumPix;
}
}
}
__global__ void jax::bilinearInterpolation(uint2 imageSize, unsigned int colorDepth, unsigned char* pixels, unsigned char* outputPixels, float outputPixelWidth){
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < imageSize.x/outputPixelWidth && j < imageSize.y/outputPixelWidth){
float x = i*outputPixelWidth;
float y = j*outputPixelWidth;
int xm = getSymmetrizedCoord((int)x,imageSize.x);
int xp = getSymmetrizedCoord((int)x + 1,imageSize.x);
int ym = getSymmetrizedCoord((int)y,imageSize.y);
int yp = getSymmetrizedCoord((int)y + 1,imageSize.y);
float2 interLocDiff = {x-floor(x),y-floor(y)};
for(int d = 0; d < colorDepth; ++d){
float sumPix = interLocDiff.x*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1.0f-interLocDiff.x)*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xm*colorDepth + d]);
sumPix += interLocDiff.x*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1-interLocDiff.x)*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xm*colorDepth + d]);
outputPixels[j*colorDepth*llroundf(imageSize.x/outputPixelWidth) + i*colorDepth + d] = (unsigned char) sumPix;
}
}
}
__global__ void jax::binImage(uint2 imageSize, unsigned int colorDepth, float* pixels, float* binnedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x < imageSize.x/2 && y < imageSize.y/2){
for(int d = 0; d < colorDepth; ++d){
float sumPix = pixels[y*colorDepth*2*imageSize.x + x*2*colorDepth + d] +
pixels[(y*2+1)*colorDepth*imageSize.x + x*2*colorDepth + d] +
pixels[y*2*colorDepth*imageSize.x + (x*2+1)*colorDepth + d] +
pixels[(y*2+1)*colorDepth*imageSize.x + (x*2+1)*colorDepth + d];
binnedImage[y*colorDepth*(imageSize.x/2) + x*colorDepth + d] = sumPix/4.0f;
}
}
}
__global__ void jax::upsampleImage(uint2 imageSize, unsigned int colorDepth, float* pixels, float* upsampledImage){
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < imageSize.x*2 && j < imageSize.y*2){
float x = i*0.5f;
float y = j*0.5f;
int xm = getSymmetrizedCoord((int)x,imageSize.x);
int xp = getSymmetrizedCoord((int)x + 1,imageSize.x);
int ym = getSymmetrizedCoord((int)y,imageSize.y);
int yp = getSymmetrizedCoord((int)y + 1,imageSize.y);
float2 interLocDiff = {x-floor(x),y-floor(y)};
for(int d = 0; d < colorDepth; ++d){
float sumPix = interLocDiff.x*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1.0f-interLocDiff.x)*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xm*colorDepth + d]);
sumPix += interLocDiff.x*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1-interLocDiff.x)*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xm*colorDepth + d]);
upsampledImage[j*colorDepth*(imageSize.x*2) + i*colorDepth + d] = sumPix;
}
}
}
__global__ void jax::bilinearInterpolation(uint2 imageSize, unsigned int colorDepth, float* pixels, float* outputPixels, float outputPixelWidth){
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < imageSize.x/outputPixelWidth && j < imageSize.y/outputPixelWidth){
float x = i*outputPixelWidth;
float y = j*outputPixelWidth;
int xm = getSymmetrizedCoord((int)x,imageSize.x);
int xp = getSymmetrizedCoord((int)x + 1,imageSize.x);
int ym = getSymmetrizedCoord((int)y,imageSize.y);
int yp = getSymmetrizedCoord((int)y + 1,imageSize.y);
float2 interLocDiff = {x-floor(x),y-floor(y)};
for(int d = 0; d < colorDepth; ++d){
float sumPix = interLocDiff.x*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1.0f-interLocDiff.x)*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xm*colorDepth + d]);
sumPix += interLocDiff.x*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1-interLocDiff.x)*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xm*colorDepth + d]);
outputPixels[j*colorDepth*llroundf(imageSize.x/outputPixelWidth) + i*colorDepth + d] = sumPix;
}
}
}
__global__ void jax::convolveImage(uint2 imageSize, unsigned char* pixels, unsigned int colorDepth, int2 kernelSize, float* kernel, float* convolvedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int color = blockIdx.z*blockDim.z + threadIdx.z;
if(x < imageSize.x && y < imageSize.y){
if(x + (kernelSize.x/2) >= imageSize.x || x < kernelSize.x/2 || y + (kernelSize.y/2) >= imageSize.y || y < kernelSize.y/2){
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = 0;
}
else{
float sum = 0.0f;
for(int ky = -kernelSize.y/2; ky <= kernelSize.y/2; ++ky){
for(int kx = -kernelSize.x/2; kx <= kernelSize.x/2; ++kx){
sum += ((float)pixels[((y+ky)*imageSize.x + (x+kx))*colorDepth + color])*kernel[(ky+(kernelSize.y/2))*kernelSize.x + (kx+(kernelSize.x/2))];
}
}
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = sum;
}
}
}
__global__ void jax::convolveImage(uint2 imageSize, float* pixels, unsigned int colorDepth, int2 kernelSize, float* kernel, float* convolvedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int color = blockIdx.z*blockDim.z + threadIdx.z;
if(x < imageSize.x && y < imageSize.y){
if(x + (kernelSize.x/2) >= imageSize.x || x < kernelSize.x/2 || y + (kernelSize.y/2) >= imageSize.y || y < kernelSize.y/2){
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = 0;
}
else{
float sum = 0.0f;
for(int ky = -kernelSize.y/2; ky <= kernelSize.y/2; ++ky){
for(int kx = -kernelSize.x/2; kx <= kernelSize.x/2; ++kx){
sum += pixels[((y+ky)*imageSize.x + (x+kx))*colorDepth + color]*kernel[(ky+(kernelSize.y/2))*kernelSize.x + (kx+(kernelSize.x/2))];
}
}
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = sum;
}
}
}
__global__ void jax::convolveImage_symmetric(uint2 imageSize, unsigned char* pixels, unsigned int colorDepth, int2 kernelSize, float* kernel, float* convolvedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int color = blockIdx.z*blockDim.z + threadIdx.z;
if(x < imageSize.x && y < imageSize.y){
int2 symmetricCoord = {0,0};
float sum = 0.0f;
for(int ky = -kernelSize.y/2; ky <= kernelSize.y/2; ++ky){
for(int kx = -kernelSize.x/2; kx <= kernelSize.x/2; ++kx){
symmetricCoord = {getSymmetrizedCoord(x+kx,(int)imageSize.x),getSymmetrizedCoord(y+ky,(int)imageSize.y)};
sum += ((float)pixels[((symmetricCoord.y)*imageSize.x + (symmetricCoord.x))*colorDepth + color])*kernel[(ky+(kernelSize.y/2))*kernelSize.x + (kx+(kernelSize.x/2))];
}
}
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = sum;
}
}
__global__ void jax::convolveImage_symmetric(uint2 imageSize, float* pixels, unsigned int colorDepth, int2 kernelSize, float* kernel, float* convolvedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int color = blockIdx.z*blockDim.z + threadIdx.z;
if(x < imageSize.x && y < imageSize.y){
int2 symmetricCoord = {0,0};
float sum = 0.0f;
for(int ky = -kernelSize.y/2; ky <= kernelSize.y/2; ++ky){
for(int kx = -kernelSize.x/2; kx <= kernelSize.x/2; ++kx){
symmetricCoord = {getSymmetrizedCoord(x+kx,(int)imageSize.x),getSymmetrizedCoord(y+ky,(int)imageSize.y)};
sum += pixels[((symmetricCoord.y)*imageSize.x + (symmetricCoord.x))*colorDepth + color]*kernel[(ky+(kernelSize.y/2))*kernelSize.x + (kx+(kernelSize.x/2))];
}
}
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = sum;
}
}
__global__ void jax::convertToCharImage(unsigned int numPixels, unsigned char* pixels, float* fltPixels){
unsigned int globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < numPixels){
pixels[globalID] = (unsigned char) 255.0f*fltPixels[globalID];
}
}
__global__ void jax::convertToFltImage(unsigned int numPixels, unsigned char* pixels, float* fltPixels){
unsigned int globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < numPixels){
fltPixels[globalID] = (float) pixels[globalID];
}
}
__global__ void jax::normalize(unsigned long numPixels, float* pixels, float2 minMax){
unsigned int globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < numPixels){
pixels[globalID] = (pixels[globalID] - minMax.x)/(minMax.y - minMax.x);
}
}
__global__ void jax::calculatePixelGradients(uint2 imageSize, unsigned char* pixels, int2* gradients){
unsigned long globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < imageSize.x*imageSize.y){
int2 loc = {(int)(globalID%imageSize.x),(int)(globalID/imageSize.x)};
int2 xContrib = {loc.x + 1,loc.x - 1};
int2 yContrib = {loc.y + 1,loc.y - 1};
if(xContrib.y == -1) xContrib = xContrib + 1;
else if(xContrib.x == imageSize.x) xContrib = xContrib - 1;
if(yContrib.y == -1) yContrib = yContrib + 1;
else if(yContrib.x == imageSize.y) yContrib = yContrib - 1;
gradients[globalID] = {
(int)pixels[loc.y*imageSize.x + xContrib.x] - (int)pixels[loc.y*imageSize.x + xContrib.y],
(int)pixels[yContrib.x*imageSize.x + loc.x] - (int)pixels[yContrib.y*imageSize.x + loc.x]
};
}
}
__global__ void jax::calculatePixelGradients(uint2 imageSize, float* pixels, float2* gradients){
unsigned long globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < imageSize.x*imageSize.y){
int2 loc = {(int)(globalID%imageSize.x),(int)(globalID/imageSize.x)};
int2 xContrib = {loc.x + 1,loc.x - 1};
int2 yContrib = {loc.y + 1,loc.y - 1};
if(xContrib.y == -1) xContrib = xContrib + 1;
else if(xContrib.x == imageSize.x) xContrib = xContrib - 1;
if(yContrib.y == -1) yContrib = yContrib + 1;
else if(yContrib.x == imageSize.y) yContrib = yContrib - 1;
gradients[globalID] = {
pixels[loc.y*imageSize.x + xContrib.x] - pixels[loc.y*imageSize.x + xContrib.y],
pixels[yContrib.x*imageSize.x + loc.x] - pixels[yContrib.y*imageSize.x + loc.x]
};
}
}
| b17761eb7527af6f29288b9a3bfa971cb5abdc73.cu | #include "Image.cuh"
__device__ __host__ jax::Image::Camera::Camera(){
this->cam_rot = {0.0f,0.0f,0.0f};
this->cam_pos = {0.0f,0.0f,0.0f};
this->fov = {0.0f,0.0f};
this->foc = 0;
this->dpix = {0.0f,0.0f};
this->size = {0,0};
}
__device__ __host__ jax::Image::Camera::Camera(uint2 size){
this->cam_rot = {0.0f,0.0f,0.0f};
this->cam_pos = {0.0f,0.0f,0.0f};
this->fov = {0.0f,0.0f};
this->foc = 0;
this->dpix = {0.0f,0.0f};
this->size = {0,0};
}
__device__ __host__ jax::Image::Camera::Camera(uint2 size, float3 cam_pos, float3 cam_rot){
this->cam_pos = cam_pos;
this->cam_rot = cam_rot;
this->fov = {0.0f,0.0f};
this->foc = 0;
this->dpix = {0.0f,0.0f};
this->size = size;
}
jax::Image::Image(){
this->id = -1;
this->filePath = "n/a";
}
jax::Image::Image(uint2 size, unsigned int colorDepth, Unity<unsigned char>* pixels){
this->filePath = "n/a";
this->id = -1;
this->colorDepth = colorDepth;
this->pixels = pixels;
this->camera.size = size;
this->size = size;
}
jax::Image::Image(std::string filePath, int id) {
std::string filename = getFileFromFilePath(filePath);
this->filePath = filePath;
this->id = id;
this->colorDepth = 1;
unsigned char* pixels_host = nullptr;
// find the image extension
std::string extension = getFileExtension(filePath);
if(extension == "png"){ // load if PNG
pixels_host = readPNG(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
else if(extension == "tiff" || extension == "tif"){ // load if TIFF
pixels_host = readTIFF(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
else if(extension == "jpeg" || extension == "jpg"){ // load if JPG
pixels_host = readJPEG(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
// set some initial params
this->camera.size = this->size;
this->size = size;
this->pixels = new Unity<unsigned char>(pixels_host,this->size.y*this->size.x*this->colorDepth,cpu);
// read additional params, and if the param requirement is removed then don't do any of this
// checks that the image is not a seed image. extra params are not needed for seed images
if (id != -1){
std::string params_path = getFolderFromFilePath(filePath);
// defaults to reading ascii params if both exist
if (fileExists(params_path + "/params.csv")){// read in the file as an ASCII enoding
std::cout << "Reading ASCII encoded camera parameters ..." << std::endl;
std::cout << "Looking for matching file " << filename << std::endl;
// you know, this could be cleaner and generalized but idk if we wil ever want a csv reader other than here
std::ifstream file(params_path + "/params.csv"); // declare file stream: http://www.cplusplus.com/reference/iostream/ifstream/
std::string value;
while (file.good()){
// wait until we find the filename, or maybe we don't and it was empty. in that case nothing happens
getline(file,value,','); // read a string until next comma: http://www.cplusplus.com/reference/string/getline/
// sanitize the input
value.erase(std::remove(value.begin(), value.end(), '\n'), value.end());
if (filename == value){ // if we have a match, read in the parameters one by one
getline(file,value,',');
this->camera.cam_pos.x = std::atof(value.c_str());
getline(file,value,',');
this->camera.cam_pos.y = std::atof(value.c_str());
getline(file,value,',');
this->camera.cam_pos.z = std::atof(value.c_str());
getline(file,value,',');
this->camera.cam_rot.x = std::atof(value.c_str());
getline(file,value,',');
this->camera.cam_rot.y = std::atof(value.c_str());
getline(file,value,',');
this->camera.cam_rot.z = std::atof(value.c_str());
getline(file,value,',');
this->camera.fov.x = std::atof(value.c_str());
getline(file,value,',');
this->camera.fov.y = std::atof(value.c_str());
getline(file,value,',');
this->camera.foc = std::atof(value.c_str());
getline(file,value,',');
// this->camera.dpix.x = std::atof(value.c_str());
// uses pinhole camera assumption
this->camera.dpix.x = (this->camera.foc * tanf(this->camera.fov.x / 2.0f)) / (this->camera.size.x / 2.0f );
getline(file,value,',');
// this->camera.dpix.y = std::atof(value.c_str());
// uses pinhole camera assumption
this->camera.dpix.y = this->camera.dpix.x;
getline(file,value,',');
this->camera.timeStamp = std::strtoll(value.c_str(), NULL, 0);
getline(file,value,',');
// camera.size.x was already set
getline(file,value,',');
// camera.side.y was already set
file.close();
break;
}
}
file.close();
} else if (fileExists(params_path + "/params.bcp")) {
std::cout << "Reading BCP encoded camera parameters ..." << std::endl;
// TODO read in binary incoded guys here
} else { // if no config file was found!
std::cerr << "NO CAMERA PARAM FILE FOUND, at least an empty params.csv or params.bcp is required. To disable this requirement use the flag -np or -noparams" << std::endl;
// std::throw -1; // TODO make this throw an exception
}
}
std::cout << "filePath: " << filePath << std::endl;
}
jax::Image::Image(std::string filePath, unsigned int convertColorDepthTo, int id){
this->filePath = filePath;
this->id = id;
this->colorDepth = 1;
unsigned char* pixels_host = nullptr;
std::string extension = getFileExtension(filePath);
if(extension == "png"){
pixels_host = readPNG(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
else if(extension == "tiff" || extension == "tif"){
pixels_host = readTIFF(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
else if(extension == "jpeg" || extension == "jpg"){
pixels_host = readJPEG(filePath.c_str(), this->size.y, this->size.x, this->colorDepth);
}
this->camera.size = this->size;
this->size = size;
this->pixels = new Unity<unsigned char>(pixels_host,this->size.y*this->size.x*this->colorDepth,cpu);
for(int i = 0; i < this->pixels->size(); ++i){
std::cout<<this->pixels->host[i]<<std::endl;
}
if(convertColorDepthTo == 1){
convertToBW(this->pixels, this->colorDepth);
this->colorDepth = 1;
}
else if(convertColorDepthTo != 0){
std::cerr<<"ERROR: Image() does not currently support conversion to anything but BW"<<std::endl;
exit(-1);
}
}
jax::Image::~Image(){
if(this->pixels != nullptr){
delete this->pixels;
}
}
void jax::Image::convertColorDepthTo(unsigned int colorDepth){
std::cout<<"Converting pixel depth to "<<colorDepth<<" from "<<this->colorDepth<<std::endl;
if(colorDepth == 1){
convertToBW(this->pixels,this->colorDepth);
this->colorDepth = 1;
}
else if (colorDepth == 3){
convertToRGB(this->pixels,this->colorDepth);
this->colorDepth = 3;
}
else{
std::cerr<<colorDepth<<" is currently not supported in convertColorDepthTo"<<std::endl;
exit(-1);
}
}
jax::Unity<int2>* jax::Image::getPixelGradients(){
return generatePixelGradients(this->size,this->pixels);
}
void jax::Image::alterSize(int scalingFactor){
if(scalingFactor == 0){
std::cerr<<"using a binDepth of 0 results in no binning or upsampling\nuse binDepth>0 for binning and binDepth<0 for upsampling"<<std::endl;
return;
}
else if((float)this->size.x/powf(2.0f,scalingFactor) < 1.0f ||(float)this->size.y/powf(2.0f,scalingFactor) < 1.0f){
std::cerr<<"ERROR binning "<<scalingFactor<<" many times cannot be done on and image of size "<<this->size.x<<"x"<<this->size.y<<std::endl;
exit(-1);
}
MemoryState origin = this->pixels->getMemoryState();
if(origin != gpu) this->pixels->setMemoryState(gpu);
uint2 scaler = {2,2};
if(scalingFactor < 0){//upsampling
for(int i = 0; i < abs(scalingFactor); ++i){
this->pixels->setData(upsample(this->size,this->pixels)->device,this->size.x*this->size.y*this->colorDepth*4,gpu);
this->size = this->size*scaler;
}
}
else{//downsampling
for(int i = 0; i < scalingFactor; ++i){
this->pixels->setData(bin(this->size,this->pixels)->device,(this->size.x*this->size.y*this->colorDepth)/4,gpu);
this->size = this->size/scaler;
}
}
if(origin != gpu) this->pixels->setMemoryState(origin);
}
jax::Unity<unsigned char>* jax::addBufferBorder(uint2 size, jax::Unity<unsigned char>* pixels, int2 border){
if(border.x == 0 && border.y == 0){
std::cerr<<"ERROR border cannot be 0"<<std::endl;
exit(-1);
}
if(border.x*2 + (int) size.x < 0 || border.y*2 + (int)size.y < 0){
std::cerr<<"ERROR border causes negative dimensions"<<std::endl;
exit(-1);
}
if(pixels->size()%((int)size.x*size.y) != 0){
std::cerr<<"ERROR color depth cannot be determined due to pixels->size()%(size.x*size.y) != 0"<<std::endl;
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
uint2 newSize = {size.x + (border.x*2),size.y + (border.y*2)};
int colorDepth = pixels->size()/((int)size.x*size.y);
Unity<unsigned char>* bufferedPixels = new Unity<unsigned char>(nullptr,newSize.x*newSize.y*colorDepth,gpu);
for(int y = border.y; y < (int)size.y + border.y; ++y){
CudaSafeCall(cudaMemcpy(bufferedPixels->device + (y*newSize.x) + border.x,pixels->device + (y*size.x),size.x*sizeof(unsigned char),cudaMemcpyDeviceToDevice));
}
if(origin != gpu){
bufferedPixels->setMemoryState(origin);
pixels->setMemoryState(origin);
}
return bufferedPixels;
}
jax::Unity<float>* jax::addBufferBorder(uint2 size, jax::Unity<float>* pixels, int2 border){
if(border.x == 0 && border.y == 0){
std::cerr<<"ERROR border cannot be 0"<<std::endl;
exit(-1);
}
if(border.x*2 + (int) size.x < 0 || border.y*2 + (int)size.y < 0){
std::cerr<<"ERROR border causes negative dimensions"<<std::endl;
exit(-1);
}
if(pixels->size()%((int)size.x*size.y) != 0){
std::cerr<<"ERROR color depth cannot be determined due to pixels->size()%(size.x*size.y) != 0"<<std::endl;
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
uint2 newSize = {size.x + (border.x*2),size.y + (border.y*2)};
int colorDepth = pixels->size()/((int)size.x*size.y);
Unity<float>* bufferedPixels = new Unity<float>(nullptr,newSize.x*newSize.y*colorDepth,gpu);
for(int y = 0; y < (int)size.y; ++y){
CudaSafeCall(cudaMemcpy(bufferedPixels->device + ((y+border.y)*newSize.x) + border.x,pixels->device + (y*size.x),size.x*sizeof(float),cudaMemcpyDeviceToDevice));
}
if(origin != gpu){
bufferedPixels->setMemoryState(origin);
pixels->setMemoryState(origin);
}
return bufferedPixels;
}
jax::Unity<unsigned char>* jax::convertImageToChar(Unity<float>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != cpu) pixels->setMemoryState(gpu);
normalizeImage(pixels);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
getFlatGridBlock(pixels->size(),grid,block,convertToCharImage);
Unity<unsigned char>* castPixels = new Unity<unsigned char>(nullptr,pixels->size(),gpu);
convertToCharImage<<<grid,block>>>(pixels->size(),castPixels->device,pixels->device);
cudaDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
castPixels->setMemoryState(origin);
}
return castPixels;
}
jax::Unity<float>* jax::convertImageToFlt(Unity<unsigned char>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
getFlatGridBlock(pixels->size(),grid,block,convertToFltImage);
Unity<float>* castPixels = new Unity<float>(nullptr,pixels->size(),gpu);
convertToFltImage<<<grid,block>>>(pixels->size(),pixels->device,castPixels->device);
cudaDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
castPixels->setMemoryState(origin);
}
return castPixels;
}
//todo use cuda reduction instead of cpu loop for min max finding
//todo add support for color depth
void jax::normalizeImage(Unity<float>* pixels){
MemoryState origin = pixels->getMemoryState();
float2 minMax = {FLT_MAX,-FLT_MAX};
if(pixels->getFore() != both) pixels->setMemoryState(both);
for(int i = 0; i < pixels->size(); ++i){
if(minMax.x > pixels->host[i]) minMax.x = pixels->host[i];
if(minMax.y < pixels->host[i]) minMax.y = pixels->host[i];
}
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(unsigned long, float*, float2) = &normalize;
getFlatGridBlock(pixels->size(),grid,block,fp);
normalize<<<grid,block>>>(pixels->size(),pixels->device,minMax);
cudaDeviceSynchronize();
CudaCheckError();
pixels->setFore(gpu);
if(origin != both) pixels->setMemoryState(origin);
}
void jax::normalizeImage(Unity<float>* pixels, float2 minMax){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(unsigned long, float*, float2) = &normalize;
getFlatGridBlock(pixels->size(),grid,block,fp);
normalize<<<grid,block>>>(pixels->size(),pixels->device,minMax);
pixels->setFore(gpu);
cudaDeviceSynchronize();
CudaCheckError();
if(origin != gpu) pixels->setMemoryState(origin);
}
void jax::convertToBW(Unity<unsigned char>* pixels, unsigned int colorDepth){
if(colorDepth == 1){
std::cout<<"Pixels are already bw"<<std::endl;
return;
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
unsigned int numPixels = (pixels->size()/colorDepth);
unsigned char* bwPixels_device;
CudaSafeCall(cudaMalloc((void**)&bwPixels_device, numPixels*sizeof(unsigned char)));
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
getFlatGridBlock(numPixels, grid, block,generateBW);
generateBW<<<grid,block>>>(numPixels, colorDepth, pixels->device, bwPixels_device);
cudaDeviceSynchronize();
CudaCheckError();
pixels->setData(bwPixels_device, numPixels, gpu);
if(origin != gpu) pixels->setMemoryState(origin);
}
void jax::convertToRGB(Unity<unsigned char>* pixels, unsigned int colorDepth){
if(colorDepth == 3){
std::cout<<"Pixels are already rgb"<<std::endl;
return;
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
unsigned int numPixels = (pixels->size()/colorDepth);
unsigned char* rgbPixels_device;
CudaSafeCall(cudaMalloc((void**)&rgbPixels_device, numPixels*3*sizeof(unsigned char)));
dim3 grid;
dim3 block;
getFlatGridBlock(numPixels, grid, block,generateRGB);
generateRGB<<<grid,block>>>(numPixels, colorDepth, pixels->device, rgbPixels_device);
cudaDeviceSynchronize();
CudaCheckError();
pixels->setData(rgbPixels_device, 3*numPixels, gpu);
if(origin != gpu) pixels->setMemoryState(origin);
}
//TODO implement
void calcFundamentalMatrix_2View(float cam0[3][3], float cam1[3][3], float (&F)[3][3]){
}
void jax::calcFundamentalMatrix_2View(Image* query, Image* target, float3 (&F)[3]){
if(query->camera.foc != target->camera.foc){
std::cout<<"ERROR calculating fundamental matrix for 2view needs to bet taken with same camera (foc&fov are same)"<<std::endl;
exit(-1);
}
float angle1;
if(abs(query->camera.cam_rot.z) < .00001) {
if(query->camera.cam_rot.y > 0) angle1 = PI/2;
else angle1 = -1*PI/2;
}
else {
angle1 = atan(query->camera.cam_rot.y / query->camera.cam_rot.z);
if(query->camera.cam_rot.z<0 && query->camera.cam_rot.y>=0) {
angle1 += PI;
}
if(query->camera.cam_rot.z<0 && query->camera.cam_rot.y<0) {
angle1 -= PI;
}
}
float3 A1[3] = {
{1, 0, 0},
{0, cos(angle1), -sin(angle1)},
{0, sin(angle1), cos(angle1)}
};
float3 temp = {0.0f,0.0f,0.0f};
multiply(A1, query->camera.cam_rot,temp);
float angle2 = 0.0f;
if(abs(temp.z) < .00001) {
if(temp.x <= 0) angle1 = PI/2;
else angle1 = -1*PI/2;
}
else {
angle2 = atan(-1*temp.x / temp.z);
if(temp.z<0 && temp.x<0) {
angle1 += PI;
}
if(temp.z<0 && temp.x>0) {
angle2 -= PI;
}
}
float3 B1[3] = {
{cos(angle2), 0, sin(angle2)},
{0, 1, 0},
{-sin(angle2), 0, cos(angle2)}
};
float3 temp2 = {0.0f,0.0f,0.0f};
multiply(B1, temp, temp2);
float3 rot1[3];
multiply(B1, A1, rot1);
float3 rot1Transpose[3];
transpose(rot1,rot1Transpose);
multiply(rot1Transpose, temp2, temp);
angle1 = 0.0f;
if(abs(target->camera.cam_rot.z) < .00001) {
if(target->camera.cam_rot.y > 0) angle1 = PI/2;
else angle1 = -1*PI/2;
}
else {
angle1 = atan(target->camera.cam_rot.y / target->camera.cam_rot.z);
if(target->camera.cam_rot.z<0 && target->camera.cam_rot.y>=0) {
angle1 += PI;
}
if(target->camera.cam_rot.z<0 && target->camera.cam_rot.y<0) {
angle1 -= PI;
}
}
float3 A2[3] = {
{1, 0, 0},
{0, cos(angle1), -sin(angle1)},
{0, sin(angle1), cos(angle1)}
};
multiply(A2, target->camera.cam_rot,temp2);
angle2 = 0.0f;
if(abs(temp2.z) < .00001) {
if(temp2.x <= 0) angle1 = PI/2;
else angle1 = -1*PI/2;
}
else {
angle2 = atan(-1*temp2.x / temp2.z);
if(temp2.z<0 && temp2.x<0) {
angle1 += PI;
}
if(temp2.z<0 && temp2.x>0) {
angle2 -= PI;
}
}
float3 B2[3] = {
{cos(angle2), 0, sin(angle2)},
{0, 1, 0},
{-sin(angle2), 0, cos(angle2)}
};
multiply(B2, temp2, temp);
float3 rot2[3];
multiply(B2, A2, rot2);
float3 rot2Transpose[3];
transpose(rot2, rot2Transpose);
multiply(rot2Transpose, temp, temp2);
float3 K[3] = {
{query->camera.foc, 0, ((float)query->size.x)/2.0f},//NOTE the foc was divided by dpix.x and dpix.y but currently using foc in pixels
{0, query->camera.foc, ((float)query->size.y)/2.0f},//NOTE the foc was divided by dpix.x and dpix.y but currently using foc in pixels
{0, 0, 1}
};
float3 K_inv[3];
inverse(K,K_inv);
float3 K_invTranspose[3];
transpose(K_inv,K_invTranspose);
float3 R[3];
multiply(rot2Transpose, rot1, R);
float3 S[3] = {
{0, query->camera.cam_pos.z - target->camera.cam_pos.z, target->camera.cam_pos.y - query->camera.cam_pos.y},
{query->camera.cam_pos.z - target->camera.cam_pos.z,0, query->camera.cam_pos.x - target->camera.cam_pos.x},
{query->camera.cam_pos.y - target->camera.cam_pos.y, target->camera.cam_pos.x - query->camera.cam_pos.x, 0}
};
float3 E[3];
multiply(R,S,E);
float3 tempF[3];
multiply(K_invTranspose, E,tempF);
multiply(tempF, K_inv, F);
std::cout << std::endl <<"between image "<<query->id<<" and "<<target->id
<<" the final fundamental matrix result is: " << std::endl;
for(int r = 0; r < 3; ++r) {
std::cout << F[r].x << " " << F[r].y << " "<< F[r].z << std::endl;
}
std::cout<<std::endl;
}
void jax::get_cam_params2view(Image* cam1, Image* cam2, std::string infile){
std::ifstream input(infile);
std::string line;
float res = 0.0f;
while(std::getline(input, line)) {
std::istringstream iss(line);
std::string param;
float arg1;
float arg2;
float arg3;
iss >> param >> arg1;
if(param.compare("foc") == 0) {
cam1->camera.foc = arg1;
cam2->camera.foc = arg1;
}
else if(param.compare("fov") == 0) {
//cam1->camera.fov = arg1;
//cam2->camera.fov = arg1;
}
else if(param.compare("res") == 0) {
res = arg1;
}
else if(param.compare("cam1C") == 0) {
iss >> arg2 >> arg3;
cam1->camera.cam_pos.x = arg1;
cam1->camera.cam_pos.y = arg2;
cam1->camera.cam_pos.z = arg3;
}
else if(param.compare("cam1V") == 0) {
iss >> arg2 >> arg3;
cam1->camera.cam_rot.x = arg1;
cam1->camera.cam_rot.y = arg2;
cam1->camera.cam_rot.z = arg3;
}
else if(param.compare("cam2C") == 0) {
iss >> arg2 >> arg3;
cam2->camera.cam_pos.x = arg1;
cam2->camera.cam_pos.y = arg2;
cam2->camera.cam_pos.z = arg3;
}
else if(param.compare("cam2V") == 0) {
iss >> arg2 >> arg3;
cam2->camera.cam_rot.x = arg1;
cam2->camera.cam_rot.y = arg2;
cam2->camera.cam_rot.z = arg3;
}
}
cam1->camera.dpix = {cam1->camera.foc*tan(cam1->camera.fov.x/2)/(cam1->size.x/2),
cam1->camera.foc*tan(cam1->camera.fov.y/2)/(cam1->size.y/2)};
cam2->camera.dpix = {cam2->camera.foc*tan(cam2->camera.fov.x/2)/(cam2->size.x/2),
cam2->camera.foc*tan(cam2->camera.fov.y/2)/(cam2->size.y/2)};
}
jax::Unity<int2>* jax::generatePixelGradients(uint2 imageSize, Unity<unsigned char>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
int2* gradients_device = nullptr;
CudaSafeCall(cudaMalloc((void**)&gradients_device,pixels->size()*sizeof(int2)));
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2,unsigned char*,int2*) = &calculatePixelGradients;
getFlatGridBlock(pixels->size(),grid,block,fp);
calculatePixelGradients<<<grid,block>>>(imageSize,pixels->device,gradients_device);
CudaCheckError();
if(origin != gpu) pixels->setMemoryState(origin);
return new Unity<int2>(gradients_device,pixels->size(),gpu);
}
jax::Unity<float2>* jax::generatePixelGradients(uint2 imageSize, Unity<float>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
float2* gradients_device = nullptr;
CudaSafeCall(cudaMalloc((void**)&gradients_device,pixels->size()*sizeof(float2)));
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2,float*,float2*) = &calculatePixelGradients;
getFlatGridBlock(pixels->size(),grid,block,fp);
calculatePixelGradients<<<grid,block>>>(imageSize,pixels->device,gradients_device);
CudaCheckError();
if(origin == cpu) pixels->setMemoryState(cpu);
return new Unity<float2>(gradients_device,pixels->size(),gpu);
}
void jax::makeBinnable(uint2 &size, Unity<unsigned char>* pixels, int plannedDepth){
MemoryState origin = pixels->getMemoryState();
int numResize = (int)pow(2, plannedDepth);
int dimOffset[2] = {(int)size.x%numResize,(int)size.y%numResize};
if(dimOffset[0] || dimOffset[1]){
if(origin != gpu) pixels->setMemoryState(gpu);
bool mustSizeUp = size.x%2 || size.y%2;
if(mustSizeUp){
pixels->setData(upsample(size,pixels)->device,pixels->size()*4,gpu);
size = size*2;numResize *= 2;
dimOffset[0] = size.x%numResize;
dimOffset[1] = size.y%numResize;
}
int2 border = {
dimOffset[0] ? (numResize-((int)size.x%numResize))/2 : 0,
dimOffset[1] ? (numResize-((int)size.y%numResize))/2 : 0
};
uint2 newSize = {border.x*2 + size.x, border.y*2 + size.y};
pixels->setData(addBufferBorder(size,pixels,border)->device,newSize.x*newSize.y,gpu);
size = newSize;
pixels->setData(bin(size,pixels)->device,pixels->size()/4,gpu);
size = size/2;
if(origin != gpu) pixels->setMemoryState(origin);
}
else{
std::cout<<"no resize necessary for binning to depth "<<plannedDepth<<std::endl;//TODO turn to verbose debug
}
}
void jax::makeBinnable(uint2 &size, Unity<float>* pixels, int plannedDepth){
MemoryState origin = pixels->getMemoryState();
int numResize = (int)pow(2, plannedDepth);
int dimOffset[2] = {(int)size.x%numResize,(int)size.y%numResize};
if(dimOffset[0] || dimOffset[1]){
if(origin != gpu) pixels->setMemoryState(gpu);
bool mustSizeUp = size.x%2 || size.y%2;
if(mustSizeUp){
pixels->setData(upsample(size,pixels)->device,pixels->size()*4,gpu);
size = size*2;numResize *= 2;
dimOffset[0] = size.x%numResize;
dimOffset[1] = size.y%numResize;
}
int2 border = {
dimOffset[0] ? (numResize-((int)size.x%numResize))/2 : 0,
dimOffset[1] ? (numResize-((int)size.y%numResize))/2 : 0
};
uint2 newSize = {border.x*2 + size.x, border.y*2 + size.y};
pixels->setData(addBufferBorder(size,pixels,border)->device,newSize.x*newSize.y,gpu);
size = newSize;
if(mustSizeUp){
pixels->setData(bin(size,pixels)->device,pixels->size()/4,gpu);
size = size/2;
}
if(origin != gpu) pixels->setMemoryState(origin);
}
else{
std::cout<<"no resize necessary for binning to depth "<<plannedDepth<<std::endl;//TODO turn to verbose debug
}
}
jax::Unity<unsigned char>* jax::bin(uint2 imageSize, Unity<unsigned char>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
Unity<unsigned char>* binnedImage = new Unity<unsigned char>(nullptr, pixels->size()/4, gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, unsigned char*, unsigned char*) = &binImage;
get2DGridBlock(imageSize/2,grid,block,fp);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
binImage<<<grid,block>>>(imageSize,colorDepth,pixels->device,binnedImage->device);
cudaDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
binnedImage->setMemoryState(origin);
}
return binnedImage;
}
jax::Unity<float>* jax::bin(uint2 imageSize, Unity<float>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
Unity<float>* binnedImage = new Unity<float>(nullptr, pixels->size()/4, gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, float*, float*) = &binImage;
get2DGridBlock(imageSize/2,grid,block,fp);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
binImage<<<grid,block>>>(imageSize,colorDepth,pixels->device,binnedImage->device);
cudaDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
binnedImage->setMemoryState(origin);
}
return binnedImage;
}
jax::Unity<unsigned char>* jax::upsample(uint2 imageSize, Unity<unsigned char>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, unsigned char*, unsigned char*) = &upsampleImage;
get2DGridBlock(imageSize*2,grid,block,fp);
Unity<unsigned char>* upsampledImage = new Unity<unsigned char>(nullptr, pixels->size()*4, gpu);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
upsampleImage<<<grid,block>>>(imageSize,colorDepth,pixels->device,upsampledImage->device);
cudaDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
upsampledImage->setMemoryState(origin);
}
return upsampledImage;
}
jax::Unity<float>* jax::upsample(uint2 imageSize, Unity<float>* pixels){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, float*, float*) = &upsampleImage;
get2DGridBlock(imageSize*2,grid,block,fp);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
Unity<float>* upsampledImage = new Unity<float>(nullptr, pixels->size()*4, gpu);
upsampleImage<<<grid,block>>>(imageSize,colorDepth,pixels->device,upsampledImage->device);
cudaDeviceSynchronize();
CudaCheckError();
if(origin != gpu){
pixels->setMemoryState(origin);
upsampledImage->setMemoryState(origin);
}
return upsampledImage;
}
jax::Unity<unsigned char>* jax::scaleImage(uint2 imageSize, Unity<unsigned char>* pixels, float outputPixelWidth){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
unsigned char* sampledImage_device = nullptr;
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, unsigned char*, unsigned char*, float) = &bilinearInterpolation;
get2DGridBlock((imageSize/outputPixelWidth) + 1, grid,block,fp);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
CudaSafeCall(cudaMalloc((void**)&sampledImage_device,pixels->size()*4*sizeof(unsigned char)));
bilinearInterpolation<<<grid,block>>>(imageSize,colorDepth,pixels->device,sampledImage_device,outputPixelWidth);
cudaDeviceSynchronize();
CudaCheckError();
Unity<unsigned char>* sampledImage = new Unity<unsigned char>(sampledImage_device, pixels->size()/(outputPixelWidth*outputPixelWidth), gpu);
if(origin != gpu){
pixels->setMemoryState(origin);
sampledImage->setMemoryState(origin);
}
return sampledImage;
}
jax::Unity<float>* jax::scaleImage(uint2 imageSize, Unity<float>* pixels, float outputPixelWidth){
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
float* sampledImage_device = nullptr;
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
void (*fp)(uint2, unsigned int, float*, float*, float) = &bilinearInterpolation;
get2DGridBlock((imageSize/outputPixelWidth) + 1, grid,block,fp);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
CudaSafeCall(cudaMalloc((void**)&sampledImage_device,pixels->size()*4*sizeof(float)));
bilinearInterpolation<<<grid,block>>>(imageSize,colorDepth,pixels->device,sampledImage_device,outputPixelWidth);
cudaDeviceSynchronize();
CudaCheckError();
Unity<float>* sampledImage = new Unity<float>(sampledImage_device, pixels->size()/(outputPixelWidth*outputPixelWidth), gpu);
if(origin != gpu){
pixels->setMemoryState(origin);
sampledImage->setMemoryState(origin);
}
return sampledImage;
}
jax::Unity<float>* jax::convolve(uint2 imageSize, Unity<unsigned char>* pixels, int2 kernelSize, float* kernel, bool symmetric){
if(kernelSize.x%2 == 0 || kernelSize.y%2 == 0){
std::cerr<<"ERROR kernel for image convolution must have an odd dimension"<<std::endl;
exit(-1);
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
Unity<float>* convolvedImage = new Unity<float>(nullptr,pixels->size(),gpu);
float* kernel_device = nullptr;
CudaSafeCall(cudaMalloc((void**)&kernel_device,kernelSize.x*kernelSize.y*sizeof(float)));
CudaSafeCall(cudaMemcpy(kernel_device,kernel,kernelSize.x*kernelSize.y*sizeof(float),cudaMemcpyHostToDevice));
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
if(symmetric){
void (*fp)(uint2, unsigned char*, unsigned int, int2, float*, float*) = &convolveImage_symmetric;
get2DGridBlock(imageSize,grid,block,fp);
convolveImage_symmetric<<<grid,block>>>(imageSize, pixels->device, colorDepth, kernelSize, kernel_device, convolvedImage->device);
}
else{
void (*fp)(uint2, unsigned char*, unsigned int, int2, float*, float*) = &convolveImage;
get2DGridBlock(imageSize,grid,block,fp);
convolveImage<<<grid,block>>>(imageSize, pixels->device, colorDepth, kernelSize, kernel_device, convolvedImage->device);
}
cudaDeviceSynchronize();
CudaCheckError();
CudaSafeCall(cudaFree(kernel_device));
if(origin != gpu){
convolvedImage->setMemoryState(origin);
pixels->setMemoryState(origin);
}
return convolvedImage;
}
jax::Unity<float>* jax::convolve(uint2 imageSize, Unity<float>* pixels, int2 kernelSize, float* kernel, bool symmetric){
if(kernelSize.x%2 == 0 || kernelSize.y%2 == 0){
std::cerr<<"ERROR kernel for image convolution must have an odd dimension"<<std::endl;
exit(-1);
}
MemoryState origin = pixels->getMemoryState();
if(origin != gpu) pixels->setMemoryState(gpu);
int colorDepth = pixels->size()/((int)imageSize.x*imageSize.y);
Unity<float>* convolvedImage = new Unity<float>(nullptr,pixels->size(),gpu);
float* kernel_device = nullptr;
CudaSafeCall(cudaMalloc((void**)&kernel_device,kernelSize.x*kernelSize.y*sizeof(float)));
CudaSafeCall(cudaMemcpy(kernel_device,kernel,kernelSize.x*kernelSize.y*sizeof(float),cudaMemcpyHostToDevice));
dim3 grid = {1,1,1};
dim3 block = {1,1,1};
if(symmetric){
void (*fp)(uint2, float*, unsigned int, int2, float*, float*) = &convolveImage_symmetric;
get2DGridBlock(imageSize,grid,block,fp);
convolveImage_symmetric<<<grid,block>>>(imageSize, pixels->device, colorDepth, kernelSize, kernel_device, convolvedImage->device);
}
else{
void (*fp)(uint2, float*, unsigned int, int2, float*, float*) = &convolveImage;
get2DGridBlock(imageSize,grid,block,fp);
convolveImage<<<grid,block>>>(imageSize, pixels->device, colorDepth, kernelSize, kernel_device, convolvedImage->device);
}
cudaDeviceSynchronize();
CudaCheckError();
CudaSafeCall(cudaFree(kernel_device));
if(origin != gpu){
convolvedImage->setMemoryState(origin);
pixels->setMemoryState(origin);
}
return convolvedImage;
}
__device__ __host__ __forceinline__ int jax::getSymmetrizedCoord(int i, unsigned int l){
int ll = 2*l;
i = (i+ll)%ll;
return (i>l-1) ? i = ll - 1 - i : i;
}
__device__ __host__ __forceinline__ unsigned char jax::bwaToBW(const uchar2 &color){
return (1-color.y)*color.x + color.y*color.x;
}
__device__ __host__ __forceinline__ unsigned char jax::rgbToBW(const uchar3 &color){
return (color.x/4) + (color.y/2) + (color.z/4);
}
__device__ __host__ __forceinline__ unsigned char jax::rgbaToBW(const uchar4 &color){
return rgbToBW(rgbaToRGB(color));
}
__device__ __host__ __forceinline__ uchar3 jax::bwToRGB(const unsigned char &color){
int colorTemp = (int) color*10;
return {(unsigned char)(colorTemp/4),(unsigned char)(colorTemp/2),(unsigned char)(colorTemp/4)};
}
__device__ __host__ __forceinline__ uchar3 jax::bwaToRGB(const uchar2 &color){
return {color.x,color.y,(unsigned char)((color.x/3)*2 + (color.y/3))};
}
__device__ __host__ __forceinline__ uchar3 jax::rgbaToRGB(const uchar4 &color){
return {
(unsigned char)((1-color.w)*color.x + color.w*color.x),
(unsigned char)((1-color.w)*color.y + color.w*color.y),
(unsigned char)((1-color.w)*color.z + color.w*color.z),
};
}
__global__ void jax::generateBW(int numPixels, unsigned int colorDepth, unsigned char* colorPixels, unsigned char* pixels){
unsigned int globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < numPixels){
int numValues = (int) colorDepth;
switch(numValues){
case 2:
pixels[globalID] = bwaToBW({colorPixels[globalID*numValues],colorPixels[globalID*numValues + 1]});
break;
case 3:
pixels[globalID] = rgbToBW({colorPixels[globalID*numValues],colorPixels[globalID*numValues + 1], colorPixels[globalID*numValues + 2]});
break;
case 4:
pixels[globalID] = rgbaToBW({colorPixels[globalID*numValues],colorPixels[globalID*numValues + 1], colorPixels[globalID*numValues + 2], colorPixels[globalID*numValues + 3]});
break;
default:
printf("ERROR colorDepth of %u is not supported\n",numValues);
asm("trap;");
}
}
}
__global__ void jax::generateRGB(int numPixels, unsigned int colorDepth, unsigned char* colorPixels, unsigned char* pixels){
unsigned int globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < numPixels){
int numValues = colorDepth;
uchar3 value;
switch(numValues){
case 1:
value = bwToRGB(colorPixels[globalID]);
break;
case 2:
value = bwaToRGB({colorPixels[globalID*numValues],colorPixels[globalID*numValues + 1]});
break;
case 4:
value = rgbaToRGB({colorPixels[globalID*numValues],colorPixels[globalID*numValues + 1], colorPixels[globalID*numValues + 2], colorPixels[globalID*numValues + 3]});
break;
default:
printf("ERROR colorDepth of %u is not supported\n",numValues);
asm("trap;");
}
pixels[globalID*3] = value.x;
pixels[globalID*3 + 1] = value.y;
pixels[globalID*3 + 2] = value.z;
}
}
__global__ void jax::binImage(uint2 imageSize, unsigned int colorDepth, unsigned char* pixels, unsigned char* binnedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x < imageSize.x/2 && y < imageSize.y/2){
for(int d = 0; d < colorDepth; ++d){
float sumPix = pixels[y*colorDepth*2*imageSize.x + (x*2*colorDepth) + d] +
pixels[(y*2+1)*colorDepth*imageSize.x + (x*2*colorDepth) + d] +
pixels[y*2*colorDepth*imageSize.x + ((x*2+1)*colorDepth) + d] +
pixels[(y*2+1)*colorDepth*imageSize.x + ((x*2+1)*colorDepth) + d];
binnedImage[y*colorDepth*(imageSize.x/2) + (x*colorDepth) + d] = (unsigned char) roundf(sumPix/4.0f);
}
}
}
__global__ void jax::upsampleImage(uint2 imageSize, unsigned int colorDepth, unsigned char* pixels, unsigned char* upsampledImage){
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < imageSize.x*2 && j < imageSize.y*2){
float x = i*0.5f;
float y = j*0.5f;
int xm = getSymmetrizedCoord((int)x,imageSize.x);
int xp = getSymmetrizedCoord((int)x + 1,imageSize.x);
int ym = getSymmetrizedCoord((int)y,imageSize.y);
int yp = getSymmetrizedCoord((int)y + 1,imageSize.y);
float2 interLocDiff = {x-floor(x),y-floor(y)};
for(int d = 0; d < colorDepth; ++d){
float sumPix = interLocDiff.x*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1.0f-interLocDiff.x)*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xm*colorDepth + d]);
sumPix += interLocDiff.x*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1-interLocDiff.x)*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xm*colorDepth + d]);
upsampledImage[j*colorDepth*(imageSize.x*2) + i*colorDepth + d] = (unsigned char) sumPix;
}
}
}
__global__ void jax::bilinearInterpolation(uint2 imageSize, unsigned int colorDepth, unsigned char* pixels, unsigned char* outputPixels, float outputPixelWidth){
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < imageSize.x/outputPixelWidth && j < imageSize.y/outputPixelWidth){
float x = i*outputPixelWidth;
float y = j*outputPixelWidth;
int xm = getSymmetrizedCoord((int)x,imageSize.x);
int xp = getSymmetrizedCoord((int)x + 1,imageSize.x);
int ym = getSymmetrizedCoord((int)y,imageSize.y);
int yp = getSymmetrizedCoord((int)y + 1,imageSize.y);
float2 interLocDiff = {x-floor(x),y-floor(y)};
for(int d = 0; d < colorDepth; ++d){
float sumPix = interLocDiff.x*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1.0f-interLocDiff.x)*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xm*colorDepth + d]);
sumPix += interLocDiff.x*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1-interLocDiff.x)*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xm*colorDepth + d]);
outputPixels[j*colorDepth*llroundf(imageSize.x/outputPixelWidth) + i*colorDepth + d] = (unsigned char) sumPix;
}
}
}
__global__ void jax::binImage(uint2 imageSize, unsigned int colorDepth, float* pixels, float* binnedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x < imageSize.x/2 && y < imageSize.y/2){
for(int d = 0; d < colorDepth; ++d){
float sumPix = pixels[y*colorDepth*2*imageSize.x + x*2*colorDepth + d] +
pixels[(y*2+1)*colorDepth*imageSize.x + x*2*colorDepth + d] +
pixels[y*2*colorDepth*imageSize.x + (x*2+1)*colorDepth + d] +
pixels[(y*2+1)*colorDepth*imageSize.x + (x*2+1)*colorDepth + d];
binnedImage[y*colorDepth*(imageSize.x/2) + x*colorDepth + d] = sumPix/4.0f;
}
}
}
__global__ void jax::upsampleImage(uint2 imageSize, unsigned int colorDepth, float* pixels, float* upsampledImage){
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < imageSize.x*2 && j < imageSize.y*2){
float x = i*0.5f;
float y = j*0.5f;
int xm = getSymmetrizedCoord((int)x,imageSize.x);
int xp = getSymmetrizedCoord((int)x + 1,imageSize.x);
int ym = getSymmetrizedCoord((int)y,imageSize.y);
int yp = getSymmetrizedCoord((int)y + 1,imageSize.y);
float2 interLocDiff = {x-floor(x),y-floor(y)};
for(int d = 0; d < colorDepth; ++d){
float sumPix = interLocDiff.x*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1.0f-interLocDiff.x)*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xm*colorDepth + d]);
sumPix += interLocDiff.x*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1-interLocDiff.x)*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xm*colorDepth + d]);
upsampledImage[j*colorDepth*(imageSize.x*2) + i*colorDepth + d] = sumPix;
}
}
}
__global__ void jax::bilinearInterpolation(uint2 imageSize, unsigned int colorDepth, float* pixels, float* outputPixels, float outputPixelWidth){
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < imageSize.x/outputPixelWidth && j < imageSize.y/outputPixelWidth){
float x = i*outputPixelWidth;
float y = j*outputPixelWidth;
int xm = getSymmetrizedCoord((int)x,imageSize.x);
int xp = getSymmetrizedCoord((int)x + 1,imageSize.x);
int ym = getSymmetrizedCoord((int)y,imageSize.y);
int yp = getSymmetrizedCoord((int)y + 1,imageSize.y);
float2 interLocDiff = {x-floor(x),y-floor(y)};
for(int d = 0; d < colorDepth; ++d){
float sumPix = interLocDiff.x*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1.0f-interLocDiff.x)*interLocDiff.y*((float)pixels[yp*colorDepth*imageSize.x + xm*colorDepth + d]);
sumPix += interLocDiff.x*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xp*colorDepth + d]);
sumPix += (1-interLocDiff.x)*(1-interLocDiff.y)*((float)pixels[ym*colorDepth*imageSize.x + xm*colorDepth + d]);
outputPixels[j*colorDepth*llroundf(imageSize.x/outputPixelWidth) + i*colorDepth + d] = sumPix;
}
}
}
__global__ void jax::convolveImage(uint2 imageSize, unsigned char* pixels, unsigned int colorDepth, int2 kernelSize, float* kernel, float* convolvedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int color = blockIdx.z*blockDim.z + threadIdx.z;
if(x < imageSize.x && y < imageSize.y){
if(x + (kernelSize.x/2) >= imageSize.x || x < kernelSize.x/2 || y + (kernelSize.y/2) >= imageSize.y || y < kernelSize.y/2){
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = 0;
}
else{
float sum = 0.0f;
for(int ky = -kernelSize.y/2; ky <= kernelSize.y/2; ++ky){
for(int kx = -kernelSize.x/2; kx <= kernelSize.x/2; ++kx){
sum += ((float)pixels[((y+ky)*imageSize.x + (x+kx))*colorDepth + color])*kernel[(ky+(kernelSize.y/2))*kernelSize.x + (kx+(kernelSize.x/2))];
}
}
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = sum;
}
}
}
__global__ void jax::convolveImage(uint2 imageSize, float* pixels, unsigned int colorDepth, int2 kernelSize, float* kernel, float* convolvedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int color = blockIdx.z*blockDim.z + threadIdx.z;
if(x < imageSize.x && y < imageSize.y){
if(x + (kernelSize.x/2) >= imageSize.x || x < kernelSize.x/2 || y + (kernelSize.y/2) >= imageSize.y || y < kernelSize.y/2){
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = 0;
}
else{
float sum = 0.0f;
for(int ky = -kernelSize.y/2; ky <= kernelSize.y/2; ++ky){
for(int kx = -kernelSize.x/2; kx <= kernelSize.x/2; ++kx){
sum += pixels[((y+ky)*imageSize.x + (x+kx))*colorDepth + color]*kernel[(ky+(kernelSize.y/2))*kernelSize.x + (kx+(kernelSize.x/2))];
}
}
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = sum;
}
}
}
__global__ void jax::convolveImage_symmetric(uint2 imageSize, unsigned char* pixels, unsigned int colorDepth, int2 kernelSize, float* kernel, float* convolvedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int color = blockIdx.z*blockDim.z + threadIdx.z;
if(x < imageSize.x && y < imageSize.y){
int2 symmetricCoord = {0,0};
float sum = 0.0f;
for(int ky = -kernelSize.y/2; ky <= kernelSize.y/2; ++ky){
for(int kx = -kernelSize.x/2; kx <= kernelSize.x/2; ++kx){
symmetricCoord = {getSymmetrizedCoord(x+kx,(int)imageSize.x),getSymmetrizedCoord(y+ky,(int)imageSize.y)};
sum += ((float)pixels[((symmetricCoord.y)*imageSize.x + (symmetricCoord.x))*colorDepth + color])*kernel[(ky+(kernelSize.y/2))*kernelSize.x + (kx+(kernelSize.x/2))];
}
}
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = sum;
}
}
__global__ void jax::convolveImage_symmetric(uint2 imageSize, float* pixels, unsigned int colorDepth, int2 kernelSize, float* kernel, float* convolvedImage){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int color = blockIdx.z*blockDim.z + threadIdx.z;
if(x < imageSize.x && y < imageSize.y){
int2 symmetricCoord = {0,0};
float sum = 0.0f;
for(int ky = -kernelSize.y/2; ky <= kernelSize.y/2; ++ky){
for(int kx = -kernelSize.x/2; kx <= kernelSize.x/2; ++kx){
symmetricCoord = {getSymmetrizedCoord(x+kx,(int)imageSize.x),getSymmetrizedCoord(y+ky,(int)imageSize.y)};
sum += pixels[((symmetricCoord.y)*imageSize.x + (symmetricCoord.x))*colorDepth + color]*kernel[(ky+(kernelSize.y/2))*kernelSize.x + (kx+(kernelSize.x/2))];
}
}
convolvedImage[(y*imageSize.x + x)*colorDepth + color] = sum;
}
}
__global__ void jax::convertToCharImage(unsigned int numPixels, unsigned char* pixels, float* fltPixels){
unsigned int globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < numPixels){
pixels[globalID] = (unsigned char) 255.0f*fltPixels[globalID];
}
}
__global__ void jax::convertToFltImage(unsigned int numPixels, unsigned char* pixels, float* fltPixels){
unsigned int globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < numPixels){
fltPixels[globalID] = (float) pixels[globalID];
}
}
__global__ void jax::normalize(unsigned long numPixels, float* pixels, float2 minMax){
unsigned int globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < numPixels){
pixels[globalID] = (pixels[globalID] - minMax.x)/(minMax.y - minMax.x);
}
}
__global__ void jax::calculatePixelGradients(uint2 imageSize, unsigned char* pixels, int2* gradients){
unsigned long globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < imageSize.x*imageSize.y){
int2 loc = {(int)(globalID%imageSize.x),(int)(globalID/imageSize.x)};
int2 xContrib = {loc.x + 1,loc.x - 1};
int2 yContrib = {loc.y + 1,loc.y - 1};
if(xContrib.y == -1) xContrib = xContrib + 1;
else if(xContrib.x == imageSize.x) xContrib = xContrib - 1;
if(yContrib.y == -1) yContrib = yContrib + 1;
else if(yContrib.x == imageSize.y) yContrib = yContrib - 1;
gradients[globalID] = {
(int)pixels[loc.y*imageSize.x + xContrib.x] - (int)pixels[loc.y*imageSize.x + xContrib.y],
(int)pixels[yContrib.x*imageSize.x + loc.x] - (int)pixels[yContrib.y*imageSize.x + loc.x]
};
}
}
__global__ void jax::calculatePixelGradients(uint2 imageSize, float* pixels, float2* gradients){
unsigned long globalID = (blockIdx.y* gridDim.x+ blockIdx.x)*blockDim.x + threadIdx.x;
if(globalID < imageSize.x*imageSize.y){
int2 loc = {(int)(globalID%imageSize.x),(int)(globalID/imageSize.x)};
int2 xContrib = {loc.x + 1,loc.x - 1};
int2 yContrib = {loc.y + 1,loc.y - 1};
if(xContrib.y == -1) xContrib = xContrib + 1;
else if(xContrib.x == imageSize.x) xContrib = xContrib - 1;
if(yContrib.y == -1) yContrib = yContrib + 1;
else if(yContrib.x == imageSize.y) yContrib = yContrib - 1;
gradients[globalID] = {
pixels[loc.y*imageSize.x + xContrib.x] - pixels[loc.y*imageSize.x + xContrib.y],
pixels[yContrib.x*imageSize.x + loc.x] - pixels[yContrib.y*imageSize.x + loc.x]
};
}
}
|
e19100b4c48d36f5c2d807f82ad6ce48c042e2e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include "support.h"
#include "kernel.hip"
#define HILOS 32
int main(int argc, char**argv) {
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
unsigned int n;
if(argc == 1) {
n = 10000;
} else if(argc == 2) {
n = atoi(argv[1]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./vecadd # Vector of size 10,000 is used"
"\n Usage: ./vecadd <m> # Vector of size m is used"
"\n");
exit(0);
}
int ld = n;
double *A_h = (double*) malloc( sizeof(double)*n*n );
for (unsigned int i=0; i < n; i++)
{
for (unsigned int j=0; j < n; j++)
{
A_h[i*ld + j] = (rand()%100)/100.00;
}
}
double *B_h = (double*) malloc( sizeof(double)*n*n );
for (unsigned int i=0; i < n; i++)
{
for (unsigned int j=0; j < n; j++)
{
B_h[i*ld + j] = (rand()%100)/100.00;
}
}
double *C_h = (double*) malloc( sizeof(double)*n*n );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Matrix size = %u * %u\n", n,n);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
double *A_d, *B_d, *C_d;
hipMalloc( (void **) &A_d, sizeof(double)*n*n);
hipMalloc( (void **) &B_d, sizeof(double)*n*n);
hipMalloc( (void **) &C_d, sizeof(double)*n*n);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(A_d, A_h, sizeof(double)*n*n, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, sizeof(double)*n*n, hipMemcpyHostToDevice);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
dim3 num_threads(HILOS, HILOS);
dim3 num_blocks((int)((n + HILOS - 1) / num_threads.x),(int)((n + HILOS - 1) / num_threads.y));
hipLaunchKernelGGL(( vecAddKernelUnshared), dim3(num_blocks), dim3(num_threads), 0, 0, A_d, B_d, C_d, n);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(C_h, C_d, sizeof(double)*n*n, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, n);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
}
| e19100b4c48d36f5c2d807f82ad6ce48c042e2e5.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include "support.h"
#include "kernel.cu"
#define HILOS 32
int main(int argc, char**argv) {
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
unsigned int n;
if(argc == 1) {
n = 10000;
} else if(argc == 2) {
n = atoi(argv[1]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./vecadd # Vector of size 10,000 is used"
"\n Usage: ./vecadd <m> # Vector of size m is used"
"\n");
exit(0);
}
int ld = n;
double *A_h = (double*) malloc( sizeof(double)*n*n );
for (unsigned int i=0; i < n; i++)
{
for (unsigned int j=0; j < n; j++)
{
A_h[i*ld + j] = (rand()%100)/100.00;
}
}
double *B_h = (double*) malloc( sizeof(double)*n*n );
for (unsigned int i=0; i < n; i++)
{
for (unsigned int j=0; j < n; j++)
{
B_h[i*ld + j] = (rand()%100)/100.00;
}
}
double *C_h = (double*) malloc( sizeof(double)*n*n );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Matrix size = %u * %u\n", n,n);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
double *A_d, *B_d, *C_d;
cudaMalloc( (void **) &A_d, sizeof(double)*n*n);
cudaMalloc( (void **) &B_d, sizeof(double)*n*n);
cudaMalloc( (void **) &C_d, sizeof(double)*n*n);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(A_d, A_h, sizeof(double)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, sizeof(double)*n*n, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
dim3 num_threads(HILOS, HILOS);
dim3 num_blocks((int)((n + HILOS - 1) / num_threads.x),(int)((n + HILOS - 1) / num_threads.y));
vecAddKernelUnshared<<<num_blocks, num_threads>>>(A_d, B_d, C_d, n);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(C_h, C_d, sizeof(double)*n*n, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, n);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
68870961a55b20cd70df4959332f46858a4d9043.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file GeoTrackView.test.cu
//---------------------------------------------------------------------------//
#include "geometry/GeoTrackView.hh"
#include <thrust/device_vector.h>
#include "base/KernelParamCalculator.cuda.hh"
#include "Geo.test.hh"
using thrust::raw_pointer_cast;
using namespace celeritas;
namespace celeritas_test
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__global__ void vgg_test_kernel(const GeoParamsCRefDevice params,
const GeoStateRefDevice state,
const VGGTestInit* start,
const int max_segments,
VolumeId* ids,
double* distances)
{
CELER_EXPECT(params && state);
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= state.size())
return;
GeoTrackView geo(params, state, tid);
geo = start[tid.get()];
for (int seg = 0; seg < max_segments; ++seg)
{
if (geo.is_outside())
break;
// Move next step
real_type dist = geo.move_next_step();
// Save current ID and distance to travel
ids[tid.get() * max_segments + seg] = geo.volume_id();
distances[tid.get() * max_segments + seg] = dist;
}
}
//---------------------------------------------------------------------------//
// TESTING INTERFACE
//---------------------------------------------------------------------------//
//! Run on device and return results
VGGTestOutput vgg_test(VGGTestInput input)
{
CELER_EXPECT(input.params);
CELER_EXPECT(input.state);
CELER_EXPECT(input.init.size() == input.state.size());
CELER_EXPECT(input.max_segments > 0);
// Temporary device data for kernel
thrust::device_vector<VGGTestInit> init(input.init.begin(),
input.init.end());
thrust::device_vector<VolumeId> ids(input.init.size() * input.max_segments);
thrust::device_vector<double> distances(ids.size(), -1.0);
// Run kernel
static const celeritas::KernelParamCalculator calc_launch_params(
vgg_test_kernel, "vgg_test");
auto params = calc_launch_params(init.size());
hipLaunchKernelGGL(( vgg_test_kernel), dim3(params.grid_size), dim3(params.block_size), 0, 0,
input.params,
input.state,
raw_pointer_cast(init.data()),
input.max_segments,
raw_pointer_cast(ids.data()),
raw_pointer_cast(distances.data()));
CELER_CUDA_CHECK_ERROR();
CELER_CUDA_CALL(hipDeviceSynchronize());
// Copy result back to CPU
VGGTestOutput result;
for (auto id : thrust::host_vector<VolumeId>(ids))
{
result.ids.push_back(id ? static_cast<int>(id.get()) : -1);
}
result.distances.resize(distances.size());
thrust::copy(distances.begin(), distances.end(), result.distances.begin());
return result;
}
//---------------------------------------------------------------------------//
} // namespace celeritas_test
| 68870961a55b20cd70df4959332f46858a4d9043.cu | //---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file GeoTrackView.test.cu
//---------------------------------------------------------------------------//
#include "geometry/GeoTrackView.hh"
#include <thrust/device_vector.h>
#include "base/KernelParamCalculator.cuda.hh"
#include "Geo.test.hh"
using thrust::raw_pointer_cast;
using namespace celeritas;
namespace celeritas_test
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__global__ void vgg_test_kernel(const GeoParamsCRefDevice params,
const GeoStateRefDevice state,
const VGGTestInit* start,
const int max_segments,
VolumeId* ids,
double* distances)
{
CELER_EXPECT(params && state);
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= state.size())
return;
GeoTrackView geo(params, state, tid);
geo = start[tid.get()];
for (int seg = 0; seg < max_segments; ++seg)
{
if (geo.is_outside())
break;
// Move next step
real_type dist = geo.move_next_step();
// Save current ID and distance to travel
ids[tid.get() * max_segments + seg] = geo.volume_id();
distances[tid.get() * max_segments + seg] = dist;
}
}
//---------------------------------------------------------------------------//
// TESTING INTERFACE
//---------------------------------------------------------------------------//
//! Run on device and return results
VGGTestOutput vgg_test(VGGTestInput input)
{
CELER_EXPECT(input.params);
CELER_EXPECT(input.state);
CELER_EXPECT(input.init.size() == input.state.size());
CELER_EXPECT(input.max_segments > 0);
// Temporary device data for kernel
thrust::device_vector<VGGTestInit> init(input.init.begin(),
input.init.end());
thrust::device_vector<VolumeId> ids(input.init.size() * input.max_segments);
thrust::device_vector<double> distances(ids.size(), -1.0);
// Run kernel
static const celeritas::KernelParamCalculator calc_launch_params(
vgg_test_kernel, "vgg_test");
auto params = calc_launch_params(init.size());
vgg_test_kernel<<<params.grid_size, params.block_size>>>(
input.params,
input.state,
raw_pointer_cast(init.data()),
input.max_segments,
raw_pointer_cast(ids.data()),
raw_pointer_cast(distances.data()));
CELER_CUDA_CHECK_ERROR();
CELER_CUDA_CALL(cudaDeviceSynchronize());
// Copy result back to CPU
VGGTestOutput result;
for (auto id : thrust::host_vector<VolumeId>(ids))
{
result.ids.push_back(id ? static_cast<int>(id.get()) : -1);
}
result.distances.resize(distances.size());
thrust::copy(distances.begin(), distances.end(), result.distances.begin());
return result;
}
//---------------------------------------------------------------------------//
} // namespace celeritas_test
|
e480adce6c6eda5de428218a50e63243ca29ef42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jdct.h" /* Private declarations for DCT subsystem */
#include <fcntl.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include<sys/time.h>
#ifdef DCT_ISLOW_SUPPORTED
/*
* This module is specialized to the case DCTSIZE = 8.
*/
#if DCTSIZE != 8
Sorry, this code only copes with 8x8 DCT blocks. /* deliberate syntax err */
#endif
/*
* The poop on this scaling stuff is as follows:
*
* Each 1-D DCT step produces outputs which are a factor of sqrt(N)
* larger than the true DCT outputs. The final outputs are therefore
* a factor of N larger than desired; since N=8 this can be cured by
* a simple right shift at the end of the algorithm. The advantage of
* this arrangement is that we save two multiplications per 1-D DCT,
* because the y0 and y4 outputs need not be divided by sqrt(N).
* In the IJG code, this factor of 8 is removed by the quantization step
* (in jcdctmgr.c), NOT in this module.
*
* We have to do addition and subtraction of the integer inputs, which
* is no problem, and multiplication by fractional constants, which is
* a problem to do in integer arithmetic. We multiply all the constants
* by CONST_SCALE and convert them to integer constants (thus retaining
* CONST_BITS bits of precision in the constants). After doing a
* multiplication we have to divide the product by CONST_SCALE, with proper
* rounding, to produce the correct output. This division can be done
* cheaply as a right shift of CONST_BITS bits. We postpone shifting
* as long as possible so that partial sums can be added together with
* full fractional precision.
*
* The outputs of the first pass are scaled up by PASS1_BITS bits so that
* they are represented to better-than-integral precision. These outputs
* require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
* with the recommended scaling. (For 12-bit sample data, the intermediate
* array is INT32 anyway.)
*
* To avoid overflow of the 32-bit intermediate results in pass 2, we must
* have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis
* shows that the values given below are the most effective.
*/
#if BITS_IN_JSAMPLE == 8
#define CONST_BITS 13
#define PASS1_BITS 2
#else
#define CONST_BITS 13
#define PASS1_BITS 1 /* lose a little precision to avoid overflow */
#endif
/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
* causing a lot of useless floating-point operations at run time.
* To get around this we use the following pre-calculated constants.
* If you change CONST_BITS you may want to add appropriate values.
* (With a reasonable C compiler, you can just rely on the FIX() macro...)
*/
#if CONST_BITS == 13
#define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */
#define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */
#define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */
#define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */
#define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */
#define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */
#define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */
#define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */
#define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */
#define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */
#define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */
#define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */
#else
#define FIX_0_298631336 FIX(0.298631336)
#define FIX_0_390180644 FIX(0.390180644)
#define FIX_0_541196100 FIX(0.541196100)
#define FIX_0_765366865 FIX(0.765366865)
#define FIX_0_899976223 FIX(0.899976223)
#define FIX_1_175875602 FIX(1.175875602)
#define FIX_1_501321110 FIX(1.501321110)
#define FIX_1_847759065 FIX(1.847759065)
#define FIX_1_961570560 FIX(1.961570560)
#define FIX_2_053119869 FIX(2.053119869)
#define FIX_2_562915447 FIX(2.562915447)
#define FIX_3_072711026 FIX(3.072711026)
#endif
#ifdef DCT_SCALING_SUPPORTED
/* Multiply an INT32 variable by an INT32 constant to yield an INT32 result.
* For 8-bit samples with the recommended scaling, all the variable
* and constant values involved are no more than 16 bits wide, so a
* 16x16->32 bit multiply can be used instead of a full 32x32 multiply.
* For 12-bit samples, a full 32-bit multiplication will be needed.
*/
#if BITS_IN_JSAMPLE == 8
#define MULTIPLY(var,const) MULTIPLY16C16(var,const)
#else
#define MULTIPLY(var,const) ((var) * (const))
#endif
struct timeval start,end;
int *devicedataptr,*deviceelem,*devicedata;
DCTELEM *dataptr,*elem;
DCTELEM *workspace;
int threadnumber = 16;
int blocknumber = 1;
__global__ void jpeg_fdct_16x16_row (DCTELEM *elemptr_local, int *dataptr_local,DCTELEM *workspace)
{
int ID = blockIdx.x * blockDim.x + threadIdx.x;
INT32 tmp[18];
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* cK represents sqrt(2) * cos(K*pi/32).
*/
elemptr_local += ID*16;
if(ID >= 8)
dataptr_local=workspace+(ID-8)*8;
else
dataptr_local+=ID*8;
/* Even part */
for(int i = 0;i<8;i++){
tmp[i] = elemptr_local[i]+elemptr_local[15-i];
}
for( int i =0; i<4;i++){
tmp[i+10] = tmp[i]+tmp[7-i];
tmp[i+14] = tmp[i]-tmp[7-i];
}
for(int i =0;i<8;i++){
tmp[i] = elemptr_local[i]-elemptr_local[15-i];
}
/* Apply unsigned->signed conversion */
dataptr_local[0] = (DCTELEM)
((tmp[10] + tmp[11] + tmp[12] + tmp[13] - 16 * CENTERJSAMPLE) << PASS1_BITS);
dataptr_local[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp[10] - tmp[13], FIX(1.306562965)) + /* c4[16] = c2[8] */
MULTIPLY(tmp[11] - tmp[12], FIX_0_541196100), /* c12[16] = c6[8] */
CONST_BITS-PASS1_BITS);
tmp[10] = MULTIPLY(tmp[17] - tmp[15], FIX(0.275899379)) + /* c14[16] = c7[8] */
MULTIPLY(tmp[14] - tmp[16], FIX(1.387039845)); /* c2[16] = c1[8] */
dataptr_local[2] = (DCTELEM)
DESCALE(tmp[10] + MULTIPLY(tmp[15], FIX(1.451774982)) /* c6+c14 */
+ MULTIPLY(tmp[16], FIX(2.172734804)), /* c2+c10 */
CONST_BITS-PASS1_BITS);
dataptr_local[6] = (DCTELEM)
DESCALE(tmp[10] - MULTIPLY(tmp[14], FIX(0.211164243)) /* c2-c6 */
- MULTIPLY(tmp[17], FIX(1.061594338)), /* c10+c14 */
CONST_BITS-PASS1_BITS);
/* Odd part */
tmp[11] = MULTIPLY(tmp[0] + tmp[1], FIX(1.353318001)) + /* c3 */
MULTIPLY(tmp[6] - tmp[7], FIX(0.410524528)); /* c13 */
tmp[12] = MULTIPLY(tmp[0] + tmp[2], FIX(1.247225013)) + /* c5 */
MULTIPLY(tmp[5] + tmp[7], FIX(0.666655658)); /* c11 */
tmp[13] = MULTIPLY(tmp[0] + tmp[3], FIX(1.093201867)) + /* c7 */
MULTIPLY(tmp[4] - tmp[7], FIX(0.897167586)); /* c9 */
tmp[14] = MULTIPLY(tmp[1] + tmp[2], FIX(0.138617169)) + /* c15 */
MULTIPLY(tmp[6] - tmp[5], FIX(1.407403738)); /* c1 */
tmp[15] = MULTIPLY(tmp[1] + tmp[3], - FIX(0.666655658)) + /* -c11 */
MULTIPLY(tmp[4] + tmp[6], - FIX(1.247225013)); /* -c5 */
tmp[16] = MULTIPLY(tmp[2] + tmp[3], - FIX(1.353318001)) + /* -c3 */
MULTIPLY(tmp[5] - tmp[4], FIX(0.410524528)); /* c13 */
tmp[10] = tmp[11] + tmp[12] + tmp[13] -
MULTIPLY(tmp[0], FIX(2.286341144)) + /* c7+c5+c3-c1 */
MULTIPLY(tmp[7], FIX(0.779653625)); /* c15+c13-c11+c9 */
tmp[11] += tmp[14] + tmp[15] + MULTIPLY(tmp[1], FIX(0.071888074)) /* c9-c3-c15+c11 */
- MULTIPLY(tmp[6], FIX(1.663905119)); /* c7+c13+c1-c5 */
tmp[12] += tmp[14] + tmp[16] - MULTIPLY(tmp[2], FIX(1.125726048)) /* c7+c5+c15-c3 */
+ MULTIPLY(tmp[5], FIX(1.227391138)); /* c9-c11+c1-c13 */
tmp[13] += tmp[15] + tmp[16] + MULTIPLY(tmp[3], FIX(1.065388962)) /* c15+c3+c11-c7 */
+ MULTIPLY(tmp[4], FIX(2.167985692)); /* c1+c13+c5-c9 */
dataptr_local[1] = (DCTELEM) DESCALE(tmp[10], CONST_BITS-PASS1_BITS);
dataptr_local[3] = (DCTELEM) DESCALE(tmp[11], CONST_BITS-PASS1_BITS);
dataptr_local[5] = (DCTELEM) DESCALE(tmp[12], CONST_BITS-PASS1_BITS);
dataptr_local[7] = (DCTELEM) DESCALE(tmp[13], CONST_BITS-PASS1_BITS);
//if(ID%8==7 && ((ID+1)/8)%2==1)
// dataptr_local = workspace; /* switch pointer to extended workspace */
}
__global__ void jpeg_fdct_16x16_col(int *dataptr_local, DCTELEM *workspace)
{
/* Pass 2: process columns.
* We remove the PASS1_BITS s
* caling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/16)**2 = 1/2**2.
* cK represents sqrt(2) * cos(K*pi/32).
*/
int ID = blockIdx.x * blockDim.x + threadIdx.x;
INT32 tmp[18];
DCTELEM *wsptr;
dataptr_local += ID;
wsptr = workspace + ID;
/* Even part */
for(int i=0;i<8;i++){
tmp[i] = dataptr_local[DCTSIZE*i]+ wsptr[DCTSIZE*(7-i)];
}
for(int i=0;i<4;i++){
tmp[i+10] = tmp[i]+tmp[7-i];
tmp[i+14] = tmp[i]-tmp[7-i];
}
for(int i=0;i<8;i++)
tmp[i] = dataptr_local[DCTSIZE*i] - wsptr[DCTSIZE*(7-i)];
dataptr_local[DCTSIZE*0] = (DCTELEM)
DESCALE(tmp[10] + tmp[11] + tmp[12] + tmp[13], PASS1_BITS+2);
dataptr_local[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp[10] - tmp[13], FIX(1.306562965)) + /* c4[16] = c2[8] */
MULTIPLY(tmp[11] - tmp[12], FIX_0_541196100), /* c12[16] = c6[8] */
CONST_BITS+PASS1_BITS+2);
tmp[10] = MULTIPLY(tmp[17] - tmp[15], FIX(0.275899379)) + /* c14[16] = c7[8] */
MULTIPLY(tmp[14] - tmp[16], FIX(1.387039845)); /* c2[16] = c1[8] */
dataptr_local[DCTSIZE*2] = (DCTELEM)
DESCALE(tmp[10] + MULTIPLY(tmp[15], FIX(1.451774982)) /* c6+c14 */
+ MULTIPLY(tmp[16], FIX(2.172734804)), /* c2+10 */
CONST_BITS+PASS1_BITS+2);
dataptr_local[DCTSIZE*6] = (DCTELEM)
DESCALE(tmp[10] - MULTIPLY(tmp[14], FIX(0.211164243)) /* c2-c6 */
- MULTIPLY(tmp[17], FIX(1.061594338)), /* c10+c14 */
CONST_BITS+PASS1_BITS+2);
/* Odd part */
tmp[11] = MULTIPLY(tmp[0]+ tmp[1], FIX(1.353318001)) + /* c3 */
MULTIPLY(tmp[6] - tmp[7], FIX(0.410524528)); /* c13 */
tmp[12] = MULTIPLY(tmp[0] + tmp[2], FIX(1.247225013)) + /* c5 */
MULTIPLY(tmp[5] + tmp[7], FIX(0.666655658)); /* c11 */
tmp[13] = MULTIPLY(tmp[0] + tmp[3], FIX(1.093201867)) + /* c7 */
MULTIPLY(tmp[4] - tmp[7], FIX(0.897167586)); /* c9 */
tmp[14] = MULTIPLY(tmp[1] + tmp[2], FIX(0.138617169)) + /* c15 */
MULTIPLY(tmp[6] - tmp[5], FIX(1.407403738)); /* c1 */
tmp[15] = MULTIPLY(tmp[1] + tmp[3], - FIX(0.666655658)) + /* -c11 */
MULTIPLY(tmp[4] + tmp[6], - FIX(1.247225013)); /* -c5 */
tmp[16] = MULTIPLY(tmp[2] + tmp[3], - FIX(1.353318001)) + /* -c3 */
MULTIPLY(tmp[5] - tmp[4], FIX(0.410524528)); /* c13 */
tmp[10] = tmp[11] + tmp[12] + tmp[13] -
MULTIPLY(tmp[0], FIX(2.286341144)) + /* c7+c5+c3-c1 */
MULTIPLY(tmp[7], FIX(0.779653625)); /* c15+c13-c11+c9 */
tmp[11] += tmp[14] + tmp[15] + MULTIPLY(tmp[1], FIX(0.071888074)) /* c9-c3-c15+c11 */
- MULTIPLY(tmp[6], FIX(1.663905119)); /* c7+c13+c1-c5 */
tmp[12] += tmp[14] + tmp[16] - MULTIPLY(tmp[2], FIX(1.125726048)) /* c7+c5+c15-c3 */
+ MULTIPLY(tmp[5], FIX(1.227391138)); /* c9-c11+c1-c13 */
tmp[13] += tmp[15] + tmp[16] + MULTIPLY(tmp[3], FIX(1.065388962)) /* c15+c3+c11-c7 */
+ MULTIPLY(tmp[4], FIX(2.167985692)); /* c1+c13+c5-c9 */
dataptr_local[DCTSIZE*1] = (DCTELEM) DESCALE(tmp[10], CONST_BITS+PASS1_BITS+2);
dataptr_local[DCTSIZE*3] = (DCTELEM) DESCALE(tmp[11], CONST_BITS+PASS1_BITS+2);
dataptr_local[DCTSIZE*5] = (DCTELEM) DESCALE(tmp[12], CONST_BITS+PASS1_BITS+2);
dataptr_local[DCTSIZE*7] = (DCTELEM) DESCALE(tmp[13], CONST_BITS+PASS1_BITS+2);
}
int main()
{
FILE *sc,*fds;
int *elem_start, *dataptr_start;
int iter = 0;
char check;
unsigned int col;
double time_used = 0.0;
fds = fopen("sample_data_l.txt","r");
sc = fopen("start_col_l.txt","r");
if(fds==NULL || sc ==NULL){
printf("open error\n");
return 0;
}
do{
fscanf(sc,"%u",&col);
check = fgetc(sc);
iter++;
}while(check!=EOF);
fclose(sc);
printf("%d\n",iter);
dataptr = (int*) malloc(iter*256*sizeof(int));
elem= (int*) malloc(iter*256*sizeof(int));
for(int i=0;i<iter*256;i++){
fscanf(fds, "%d",&elem[i]);
}
fclose(fds);
hipMalloc(&devicedataptr,16*16*sizeof(int));
hipMalloc(&deviceelem,16*16*sizeof(int));
hipMalloc(&workspace,64*sizeof(int));
dataptr_start = dataptr;
elem_start = elem;
for(int i=0;i<iter;i++){
gettimeofday(&start,NULL);
hipMemcpy(deviceelem, elem ,256*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( jpeg_fdct_16x16_row), dim3(blocknumber),dim3(threadnumber), 0, 0, deviceelem,devicedataptr,workspace);
hipLaunchKernelGGL(( jpeg_fdct_16x16_col), dim3(blocknumber),dim3(threadnumber), 0, 0, devicedataptr,workspace);
hipMemcpy(dataptr, devicedataptr, 256*sizeof(int), hipMemcpyDeviceToHost);
gettimeofday(&end,NULL);
elem += 256;
dataptr += 256;
time_used += (double)(end.tv_sec-start.tv_sec);
time_used += (double)(end.tv_usec-start.tv_usec)/1000000;
}
fprintf(stderr,"time used:%lfs\n", time_used);
/*for(int ctr = 0;ctr<iter;ctr++){
if(ctr >21246&&ctr<22000){
for(int i=0;i<57;i++){
fprintf(stderr,"%d ",(int)dataptr_start[i]);
}
printf("%d\n",ctr);
}
dataptr_start+=256;
}
*/
return 0;
}
#endif /* DCT_SCALING_SUPPORTED */
#endif /* DCT_ISLOW_SUPPORTED */
| e480adce6c6eda5de428218a50e63243ca29ef42.cu | #define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jdct.h" /* Private declarations for DCT subsystem */
#include <fcntl.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include<sys/time.h>
#ifdef DCT_ISLOW_SUPPORTED
/*
* This module is specialized to the case DCTSIZE = 8.
*/
#if DCTSIZE != 8
Sorry, this code only copes with 8x8 DCT blocks. /* deliberate syntax err */
#endif
/*
* The poop on this scaling stuff is as follows:
*
* Each 1-D DCT step produces outputs which are a factor of sqrt(N)
* larger than the true DCT outputs. The final outputs are therefore
* a factor of N larger than desired; since N=8 this can be cured by
* a simple right shift at the end of the algorithm. The advantage of
* this arrangement is that we save two multiplications per 1-D DCT,
* because the y0 and y4 outputs need not be divided by sqrt(N).
* In the IJG code, this factor of 8 is removed by the quantization step
* (in jcdctmgr.c), NOT in this module.
*
* We have to do addition and subtraction of the integer inputs, which
* is no problem, and multiplication by fractional constants, which is
* a problem to do in integer arithmetic. We multiply all the constants
* by CONST_SCALE and convert them to integer constants (thus retaining
* CONST_BITS bits of precision in the constants). After doing a
* multiplication we have to divide the product by CONST_SCALE, with proper
* rounding, to produce the correct output. This division can be done
* cheaply as a right shift of CONST_BITS bits. We postpone shifting
* as long as possible so that partial sums can be added together with
* full fractional precision.
*
* The outputs of the first pass are scaled up by PASS1_BITS bits so that
* they are represented to better-than-integral precision. These outputs
* require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
* with the recommended scaling. (For 12-bit sample data, the intermediate
* array is INT32 anyway.)
*
* To avoid overflow of the 32-bit intermediate results in pass 2, we must
* have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis
* shows that the values given below are the most effective.
*/
#if BITS_IN_JSAMPLE == 8
#define CONST_BITS 13
#define PASS1_BITS 2
#else
#define CONST_BITS 13
#define PASS1_BITS 1 /* lose a little precision to avoid overflow */
#endif
/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
* causing a lot of useless floating-point operations at run time.
* To get around this we use the following pre-calculated constants.
* If you change CONST_BITS you may want to add appropriate values.
* (With a reasonable C compiler, you can just rely on the FIX() macro...)
*/
#if CONST_BITS == 13
#define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */
#define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */
#define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */
#define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */
#define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */
#define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */
#define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */
#define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */
#define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */
#define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */
#define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */
#define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */
#else
#define FIX_0_298631336 FIX(0.298631336)
#define FIX_0_390180644 FIX(0.390180644)
#define FIX_0_541196100 FIX(0.541196100)
#define FIX_0_765366865 FIX(0.765366865)
#define FIX_0_899976223 FIX(0.899976223)
#define FIX_1_175875602 FIX(1.175875602)
#define FIX_1_501321110 FIX(1.501321110)
#define FIX_1_847759065 FIX(1.847759065)
#define FIX_1_961570560 FIX(1.961570560)
#define FIX_2_053119869 FIX(2.053119869)
#define FIX_2_562915447 FIX(2.562915447)
#define FIX_3_072711026 FIX(3.072711026)
#endif
#ifdef DCT_SCALING_SUPPORTED
/* Multiply an INT32 variable by an INT32 constant to yield an INT32 result.
* For 8-bit samples with the recommended scaling, all the variable
* and constant values involved are no more than 16 bits wide, so a
* 16x16->32 bit multiply can be used instead of a full 32x32 multiply.
* For 12-bit samples, a full 32-bit multiplication will be needed.
*/
#if BITS_IN_JSAMPLE == 8
#define MULTIPLY(var,const) MULTIPLY16C16(var,const)
#else
#define MULTIPLY(var,const) ((var) * (const))
#endif
struct timeval start,end;
int *devicedataptr,*deviceelem,*devicedata;
DCTELEM *dataptr,*elem;
DCTELEM *workspace;
int threadnumber = 16;
int blocknumber = 1;
__global__ void jpeg_fdct_16x16_row (DCTELEM *elemptr_local, int *dataptr_local,DCTELEM *workspace)
{
int ID = blockIdx.x * blockDim.x + threadIdx.x;
INT32 tmp[18];
/* Pass 1: process rows.
* Note results are scaled up by sqrt(8) compared to a true DCT;
* furthermore, we scale the results by 2**PASS1_BITS.
* cK represents sqrt(2) * cos(K*pi/32).
*/
elemptr_local += ID*16;
if(ID >= 8)
dataptr_local=workspace+(ID-8)*8;
else
dataptr_local+=ID*8;
/* Even part */
for(int i = 0;i<8;i++){
tmp[i] = elemptr_local[i]+elemptr_local[15-i];
}
for( int i =0; i<4;i++){
tmp[i+10] = tmp[i]+tmp[7-i];
tmp[i+14] = tmp[i]-tmp[7-i];
}
for(int i =0;i<8;i++){
tmp[i] = elemptr_local[i]-elemptr_local[15-i];
}
/* Apply unsigned->signed conversion */
dataptr_local[0] = (DCTELEM)
((tmp[10] + tmp[11] + tmp[12] + tmp[13] - 16 * CENTERJSAMPLE) << PASS1_BITS);
dataptr_local[4] = (DCTELEM)
DESCALE(MULTIPLY(tmp[10] - tmp[13], FIX(1.306562965)) + /* c4[16] = c2[8] */
MULTIPLY(tmp[11] - tmp[12], FIX_0_541196100), /* c12[16] = c6[8] */
CONST_BITS-PASS1_BITS);
tmp[10] = MULTIPLY(tmp[17] - tmp[15], FIX(0.275899379)) + /* c14[16] = c7[8] */
MULTIPLY(tmp[14] - tmp[16], FIX(1.387039845)); /* c2[16] = c1[8] */
dataptr_local[2] = (DCTELEM)
DESCALE(tmp[10] + MULTIPLY(tmp[15], FIX(1.451774982)) /* c6+c14 */
+ MULTIPLY(tmp[16], FIX(2.172734804)), /* c2+c10 */
CONST_BITS-PASS1_BITS);
dataptr_local[6] = (DCTELEM)
DESCALE(tmp[10] - MULTIPLY(tmp[14], FIX(0.211164243)) /* c2-c6 */
- MULTIPLY(tmp[17], FIX(1.061594338)), /* c10+c14 */
CONST_BITS-PASS1_BITS);
/* Odd part */
tmp[11] = MULTIPLY(tmp[0] + tmp[1], FIX(1.353318001)) + /* c3 */
MULTIPLY(tmp[6] - tmp[7], FIX(0.410524528)); /* c13 */
tmp[12] = MULTIPLY(tmp[0] + tmp[2], FIX(1.247225013)) + /* c5 */
MULTIPLY(tmp[5] + tmp[7], FIX(0.666655658)); /* c11 */
tmp[13] = MULTIPLY(tmp[0] + tmp[3], FIX(1.093201867)) + /* c7 */
MULTIPLY(tmp[4] - tmp[7], FIX(0.897167586)); /* c9 */
tmp[14] = MULTIPLY(tmp[1] + tmp[2], FIX(0.138617169)) + /* c15 */
MULTIPLY(tmp[6] - tmp[5], FIX(1.407403738)); /* c1 */
tmp[15] = MULTIPLY(tmp[1] + tmp[3], - FIX(0.666655658)) + /* -c11 */
MULTIPLY(tmp[4] + tmp[6], - FIX(1.247225013)); /* -c5 */
tmp[16] = MULTIPLY(tmp[2] + tmp[3], - FIX(1.353318001)) + /* -c3 */
MULTIPLY(tmp[5] - tmp[4], FIX(0.410524528)); /* c13 */
tmp[10] = tmp[11] + tmp[12] + tmp[13] -
MULTIPLY(tmp[0], FIX(2.286341144)) + /* c7+c5+c3-c1 */
MULTIPLY(tmp[7], FIX(0.779653625)); /* c15+c13-c11+c9 */
tmp[11] += tmp[14] + tmp[15] + MULTIPLY(tmp[1], FIX(0.071888074)) /* c9-c3-c15+c11 */
- MULTIPLY(tmp[6], FIX(1.663905119)); /* c7+c13+c1-c5 */
tmp[12] += tmp[14] + tmp[16] - MULTIPLY(tmp[2], FIX(1.125726048)) /* c7+c5+c15-c3 */
+ MULTIPLY(tmp[5], FIX(1.227391138)); /* c9-c11+c1-c13 */
tmp[13] += tmp[15] + tmp[16] + MULTIPLY(tmp[3], FIX(1.065388962)) /* c15+c3+c11-c7 */
+ MULTIPLY(tmp[4], FIX(2.167985692)); /* c1+c13+c5-c9 */
dataptr_local[1] = (DCTELEM) DESCALE(tmp[10], CONST_BITS-PASS1_BITS);
dataptr_local[3] = (DCTELEM) DESCALE(tmp[11], CONST_BITS-PASS1_BITS);
dataptr_local[5] = (DCTELEM) DESCALE(tmp[12], CONST_BITS-PASS1_BITS);
dataptr_local[7] = (DCTELEM) DESCALE(tmp[13], CONST_BITS-PASS1_BITS);
//if(ID%8==7 && ((ID+1)/8)%2==1)
// dataptr_local = workspace; /* switch pointer to extended workspace */
}
__global__ void jpeg_fdct_16x16_col(int *dataptr_local, DCTELEM *workspace)
{
/* Pass 2: process columns.
* We remove the PASS1_BITS s
* caling, but leave the results scaled up
* by an overall factor of 8.
* We must also scale the output by (8/16)**2 = 1/2**2.
* cK represents sqrt(2) * cos(K*pi/32).
*/
int ID = blockIdx.x * blockDim.x + threadIdx.x;
INT32 tmp[18];
DCTELEM *wsptr;
dataptr_local += ID;
wsptr = workspace + ID;
/* Even part */
for(int i=0;i<8;i++){
tmp[i] = dataptr_local[DCTSIZE*i]+ wsptr[DCTSIZE*(7-i)];
}
for(int i=0;i<4;i++){
tmp[i+10] = tmp[i]+tmp[7-i];
tmp[i+14] = tmp[i]-tmp[7-i];
}
for(int i=0;i<8;i++)
tmp[i] = dataptr_local[DCTSIZE*i] - wsptr[DCTSIZE*(7-i)];
dataptr_local[DCTSIZE*0] = (DCTELEM)
DESCALE(tmp[10] + tmp[11] + tmp[12] + tmp[13], PASS1_BITS+2);
dataptr_local[DCTSIZE*4] = (DCTELEM)
DESCALE(MULTIPLY(tmp[10] - tmp[13], FIX(1.306562965)) + /* c4[16] = c2[8] */
MULTIPLY(tmp[11] - tmp[12], FIX_0_541196100), /* c12[16] = c6[8] */
CONST_BITS+PASS1_BITS+2);
tmp[10] = MULTIPLY(tmp[17] - tmp[15], FIX(0.275899379)) + /* c14[16] = c7[8] */
MULTIPLY(tmp[14] - tmp[16], FIX(1.387039845)); /* c2[16] = c1[8] */
dataptr_local[DCTSIZE*2] = (DCTELEM)
DESCALE(tmp[10] + MULTIPLY(tmp[15], FIX(1.451774982)) /* c6+c14 */
+ MULTIPLY(tmp[16], FIX(2.172734804)), /* c2+10 */
CONST_BITS+PASS1_BITS+2);
dataptr_local[DCTSIZE*6] = (DCTELEM)
DESCALE(tmp[10] - MULTIPLY(tmp[14], FIX(0.211164243)) /* c2-c6 */
- MULTIPLY(tmp[17], FIX(1.061594338)), /* c10+c14 */
CONST_BITS+PASS1_BITS+2);
/* Odd part */
tmp[11] = MULTIPLY(tmp[0]+ tmp[1], FIX(1.353318001)) + /* c3 */
MULTIPLY(tmp[6] - tmp[7], FIX(0.410524528)); /* c13 */
tmp[12] = MULTIPLY(tmp[0] + tmp[2], FIX(1.247225013)) + /* c5 */
MULTIPLY(tmp[5] + tmp[7], FIX(0.666655658)); /* c11 */
tmp[13] = MULTIPLY(tmp[0] + tmp[3], FIX(1.093201867)) + /* c7 */
MULTIPLY(tmp[4] - tmp[7], FIX(0.897167586)); /* c9 */
tmp[14] = MULTIPLY(tmp[1] + tmp[2], FIX(0.138617169)) + /* c15 */
MULTIPLY(tmp[6] - tmp[5], FIX(1.407403738)); /* c1 */
tmp[15] = MULTIPLY(tmp[1] + tmp[3], - FIX(0.666655658)) + /* -c11 */
MULTIPLY(tmp[4] + tmp[6], - FIX(1.247225013)); /* -c5 */
tmp[16] = MULTIPLY(tmp[2] + tmp[3], - FIX(1.353318001)) + /* -c3 */
MULTIPLY(tmp[5] - tmp[4], FIX(0.410524528)); /* c13 */
tmp[10] = tmp[11] + tmp[12] + tmp[13] -
MULTIPLY(tmp[0], FIX(2.286341144)) + /* c7+c5+c3-c1 */
MULTIPLY(tmp[7], FIX(0.779653625)); /* c15+c13-c11+c9 */
tmp[11] += tmp[14] + tmp[15] + MULTIPLY(tmp[1], FIX(0.071888074)) /* c9-c3-c15+c11 */
- MULTIPLY(tmp[6], FIX(1.663905119)); /* c7+c13+c1-c5 */
tmp[12] += tmp[14] + tmp[16] - MULTIPLY(tmp[2], FIX(1.125726048)) /* c7+c5+c15-c3 */
+ MULTIPLY(tmp[5], FIX(1.227391138)); /* c9-c11+c1-c13 */
tmp[13] += tmp[15] + tmp[16] + MULTIPLY(tmp[3], FIX(1.065388962)) /* c15+c3+c11-c7 */
+ MULTIPLY(tmp[4], FIX(2.167985692)); /* c1+c13+c5-c9 */
dataptr_local[DCTSIZE*1] = (DCTELEM) DESCALE(tmp[10], CONST_BITS+PASS1_BITS+2);
dataptr_local[DCTSIZE*3] = (DCTELEM) DESCALE(tmp[11], CONST_BITS+PASS1_BITS+2);
dataptr_local[DCTSIZE*5] = (DCTELEM) DESCALE(tmp[12], CONST_BITS+PASS1_BITS+2);
dataptr_local[DCTSIZE*7] = (DCTELEM) DESCALE(tmp[13], CONST_BITS+PASS1_BITS+2);
}
int main()
{
FILE *sc,*fds;
int *elem_start, *dataptr_start;
int iter = 0;
char check;
unsigned int col;
double time_used = 0.0;
fds = fopen("sample_data_l.txt","r");
sc = fopen("start_col_l.txt","r");
if(fds==NULL || sc ==NULL){
printf("open error\n");
return 0;
}
do{
fscanf(sc,"%u",&col);
check = fgetc(sc);
iter++;
}while(check!=EOF);
fclose(sc);
printf("%d\n",iter);
dataptr = (int*) malloc(iter*256*sizeof(int));
elem= (int*) malloc(iter*256*sizeof(int));
for(int i=0;i<iter*256;i++){
fscanf(fds, "%d",&elem[i]);
}
fclose(fds);
cudaMalloc(&devicedataptr,16*16*sizeof(int));
cudaMalloc(&deviceelem,16*16*sizeof(int));
cudaMalloc(&workspace,64*sizeof(int));
dataptr_start = dataptr;
elem_start = elem;
for(int i=0;i<iter;i++){
gettimeofday(&start,NULL);
cudaMemcpy(deviceelem, elem ,256*sizeof(int), cudaMemcpyHostToDevice);
jpeg_fdct_16x16_row<<<blocknumber,threadnumber>>>(deviceelem,devicedataptr,workspace);
jpeg_fdct_16x16_col<<<blocknumber,threadnumber>>>(devicedataptr,workspace);
cudaMemcpy(dataptr, devicedataptr, 256*sizeof(int), cudaMemcpyDeviceToHost);
gettimeofday(&end,NULL);
elem += 256;
dataptr += 256;
time_used += (double)(end.tv_sec-start.tv_sec);
time_used += (double)(end.tv_usec-start.tv_usec)/1000000;
}
fprintf(stderr,"time used:%lfs\n", time_used);
/*for(int ctr = 0;ctr<iter;ctr++){
if(ctr >21246&&ctr<22000){
for(int i=0;i<57;i++){
fprintf(stderr,"%d ",(int)dataptr_start[i]);
}
printf("%d\n",ctr);
}
dataptr_start+=256;
}
*/
return 0;
}
#endif /* DCT_SCALING_SUPPORTED */
#endif /* DCT_ISLOW_SUPPORTED */
|
fe522c96c030c2eb3e84d3160b3d2306e1920ccd.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// for the older gpus atomicAdd with double arguments does not exist
#if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__)
static __inline__ __device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old);
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
namespace{
template <typename scalar_t>
__device__ __forceinline__ void barycentric_coordinate(scalar_t *w, const scalar_t x, const scalar_t y, const scalar_t *face_info) {
w[0] = face_info[3 * 0 + 0] * x + face_info[3 * 0 + 1] * y + face_info[3 * 0 + 2];
w[1] = face_info[3 * 1 + 0] * x + face_info[3 * 1 + 1] * y + face_info[3 * 1 + 2];
w[2] = face_info[3 * 2 + 0] * x + face_info[3 * 2 + 1] * y + face_info[3 * 2 + 2];
}
template <typename scalar_t>
__device__ __forceinline__ bool check_border(const scalar_t x, const scalar_t y, const scalar_t *face, const scalar_t threshold) {
return (x > max(max(face[0], face[3]), face[6]) + threshold ||
x < min(min(face[0], face[3]), face[6]) - threshold ||
y > max(max(face[1], face[4]), face[7]) + threshold ||
y < min(min(face[1], face[4]), face[7]) - threshold);
}
template <typename scalar_t>
__device__ __forceinline__ bool check_face_frontside(const scalar_t *face) {
return (face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]);
}
template <typename scalar_t>
__device__ __forceinline__ bool check_pixel_inside(const scalar_t *w) {
return w[0] <= 1 && w[0] >= 0 && w[1] <= 1 && w[1] >= 0 && w[2] <= 1 && w[2] >= 0;
}
template <typename scalar_t>
__device__ __forceinline__ void barycentric_clip(scalar_t *w) {
for (int k = 0; k < 3; k++) w[k] = max(min(w[k], 1.), 0.);
const scalar_t w_sum = max(w[0] + w[1] + w[2], 1e-5);
for (int k = 0; k < 3; k++) w[k] /= w_sum;
}
template <typename scalar_t>
__device__ __forceinline__ void euclidean_p2f_distance(scalar_t &sign, scalar_t &dis_x, scalar_t &dis_y,
scalar_t *w, scalar_t *t,
const scalar_t* face, const scalar_t *face_info,
const scalar_t xp, const scalar_t yp) {
const scalar_t *face_sym = face_info + 9;
const scalar_t *face_obt = face_info + 18;
if (w[0] > 0 && w[1] > 0 && w[2] > 0 &&
w[0] < 1 && w[1] < 1 && w[2] < 1) {
// inside the triangle, w[0] + w[1] + w[2] = 0
scalar_t dis_min = 100000000;
scalar_t dis_x_min = 0;
scalar_t dis_y_min = 0;
scalar_t a0[3];
scalar_t t0[3];
for (int k = 0; k < 3; k++) {
int v0 = k;
int v1 = (k + 1) % 3;
int v2 = (k + 2) % 3;
a0[0] = face_sym[3 * v0 + 0] - face_sym[3 * v1 + 0];
a0[1] = face_sym[3 * v0 + 1] - face_sym[3 * v1 + 1];
a0[2] = face_sym[3 * v0 + 2] - face_sym[3 * v1 + 2];
t0[v0] = (w[0] * a0[0] + w[1] * a0[1] + w[2] * a0[2] - a0[v1]) / (a0[v0] - a0[v1]);
t0[v1] = 1 - t0[v0];
t0[v2] = 0;
t0[0] -= w[0];
t0[1] -= w[1];
t0[2] -= w[2];
// calculate distance
dis_x = t0[0] * face[0] + t0[1] * face[3] + t0[2] * face[6];
dis_y = t0[0] * face[1] + t0[1] * face[4] + t0[2] * face[7];
scalar_t dis = dis_x * dis_x + dis_y * dis_y;
if (dis < dis_min) {
dis_min = dis;
dis_x_min = dis_x;
dis_y_min = dis_y;
t[0] = t0[0];
t[1] = t0[1];
t[2] = t0[2];
}
}
dis_x = dis_x_min;
dis_y = dis_y_min;
sign = 1;
} else {
int v0 = -1;
if (w[1] <= 0 && w[2] <= 0) {
v0 = 0;
if (face_obt[0] == 1 && (xp - face[0]) * (face[6] - face[0]) + (yp - face[1]) * (face[7] - face[1]) > 0) v0 = 2;
} else if (w[2] <= 0 && w[0] <= 0) {
v0 = 1;
if (face_obt[1] == 1 && (xp - face[3]) * (face[0] - face[3]) + (yp - face[4]) * (face[1] - face[4]) > 0) v0 = 0;
} else if (w[0] <= 0 && w[1] <= 0) {
v0 = 2;
if (face_obt[2] == 1 && (xp - face[6]) * (face[3] - face[6]) + (yp - face[7]) * (face[4] - face[7]) > 0) v0 = 1;
} else
if (w[0] <= 0) v0 = 1;
else if (w[1] <= 0) v0 = 2;
else if (w[2] <= 0) v0 = 0;
const int v1 = (v0 + 1) % 3;
const int v2 = (v0 + 2) % 3;
scalar_t a0[3];
a0[0] = face_sym[3 * v0 + 0] - face_sym[3 * v1 + 0];
a0[1] = face_sym[3 * v0 + 1] - face_sym[3 * v1 + 1];
a0[2] = face_sym[3 * v0 + 2] - face_sym[3 * v1 + 2];
t[v0] = (w[0] * a0[0] + w[1] * a0[1] + w[2] * a0[2] - a0[v1]) / (a0[v0] - a0[v1]);
t[v1] = 1 - t[v0];
t[v2] = 0;
// clamp to [0, 1]
for (int k = 0; k < 3; k++) {
t[k] = min(max(t[k], 0.), 1.);
t[k] -= w[k];
}
// calculate distance
dis_x = t[0] * face[0] + t[1] * face[3] + t[2] * face[6];
dis_y = t[0] * face[1] + t[1] * face[4] + t[2] * face[7];
sign = -1;
}
}
template <typename scalar_t>
__device__ __forceinline__ void forward_barycentric_p2f_distance(scalar_t &dis, const scalar_t *w) {
dis = w[0] > w[1] ? (w[1] > w[2] ? w[2] : w[1]) : (w[0] > w[2] ? w[2] : w[0]);
dis = dis > 0 ? pow(dis, 2) : -pow(dis, 2);
}
template <typename scalar_t>
__device__ __forceinline__ void backward_barycentric_p2f_distance(scalar_t grad_v[3][3], const scalar_t *w, const scalar_t *face_info, const scalar_t xp, const scalar_t yp, const scalar_t dis, const scalar_t C) {
const int p = w[0] > w[1] ? (w[1] > w[2] ? 2 : 1) : (w[0] > w[2] ? 2 : 0);
const scalar_t *face_inv = face_info;
for (int l = 0; l < 2; l++) {
for (int k = 0; k < 3; k++) {
scalar_t grad_kl = 0;
for (int q = 0; q < 3; q++) {
grad_kl += -face_inv[3*p+l] * face_inv[3*k+q] * (q == 0 ? xp : (q == 1 ? yp : 1));
}
grad_v[k][l] = grad_kl * C;
grad_v[k][l] *= dis > 0 ? (2. * sqrt(dis)) : (2. * sqrt(-dis));
}
}
}
template <typename scalar_t>
__device__ __forceinline__ void normal_face(const scalar_t *face, scalar_t *normal) {
scalar_t nx = (face[7] - face[1]) * (face[5] - face[2]) - (face[8] - face[2]) * (face[4] - face[1]);
scalar_t ny = (face[8] - face[2]) * (face[3] - face[0]) - (face[6] - face[0]) * (face[5] - face[2]);
scalar_t nz = (face[6] - face[0]) * (face[4] - face[1]) - (face[7] - face[1]) * (face[3] - face[0]);
scalar_t norm = sqrt(nx * nx + ny * ny + nz * nz);
nx = nx / norm;
ny = ny / norm;
nz = nz / norm;
nx = (nz > 0) ? nx : -nx;
ny = (nz > 0) ? ny : -ny;
nz = (nz > 0) ? nz : -nz;
normal[0] = -nx * 0.5 + 0.5;
normal[1] = -ny * 0.5 + 0.5;
normal[2] = nz * 0.5 + 0.5;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t forward_sample_texture(const scalar_t *texture, const scalar_t *w, const int R, const int k, const int texture_sample_type) {
scalar_t texture_k;
if (texture_sample_type == 0) { // sample surface color with resolution as R
const int w_x = w[0] * R;
const int w_y = w[1] * R;
if ((w[0] + w[1]) * R - w_x - w_y <= 1) {
texture_k = texture[(w_y * R + w_x) * 3 + k];
} else {
texture_k = texture[((R - 1 - w_y) * R + (R - 1 - w_x)) * 3 + k];
}
} else
if (texture_sample_type == 1) { // sample vertex color
texture_k = w[0] * texture[k] + w[1] * texture[3+k] + w[2] * texture[6+k];
}
return texture_k;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t backward_sample_texture(const scalar_t grad_color, const scalar_t *w, const int R, const int k, const int texture_sample_type) {
scalar_t grad_texture_k;
if (texture_sample_type == 0) { // sample surface color with resolution as R
const int w_x = w[0] * R;
const int w_y = w[1] * R;
if ((w[0] + w[1]) * R - w_x - w_y <= 1) {
if (k == w_y * R + w_x) {
grad_texture_k = grad_color;
}
} else {
if (k == (R - 1 - w_y) * R + (R - 1 - w_x)) {
grad_texture_k = grad_color;
}
}
} else
if (texture_sample_type == 1) {
grad_texture_k = w[k] * grad_color;
}
return grad_texture_k;
}
// triangle preprocessing
template <typename scalar_t>
__global__ void forward_soft_rasterize_inv_cuda_kernel(
const scalar_t* __restrict__ faces,
scalar_t* faces_info,
int batch_size,
int num_faces,
int image_size) {
/* batch number, face, number, image size, face[v012][RGB] */
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * num_faces) {
return;
}
// const int is = image_size;
const scalar_t* face = &faces[i * 9];
scalar_t* face_inv = &faces_info[i * 27];
scalar_t* face_sym = &faces_info[i * 27+9];
scalar_t* face_obt = &faces_info[i * 27+18];
/* return if backside */
// if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]))
// return;
/* p[num][xy]: x, y is (-1, 1). */
// x, y in p, for z is depth
scalar_t p[3][2];
for (int num = 0; num < 3; num++) {
for (int dim = 0; dim < 2; dim++) {
p[num][dim] = face[3 * num + dim]; // no normalize
}
}
/* compute face_inv */
scalar_t face_inv_star[9] = {
p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1],
p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1],
p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]};
scalar_t face_inv_determinant = (
p[2][0] * (p[0][1] - p[1][1]) +
p[0][0] * (p[1][1] - p[2][1]) +
p[1][0] * (p[2][1] - p[0][1]));
face_inv_determinant = face_inv_determinant > 0 ? max(face_inv_determinant, 1e-10) : min(face_inv_determinant, -1e-10);
/* set to global memory */
for (int k = 0; k < 9; k++) {
face_inv[k] = face_inv_star[k] / face_inv_determinant;
}
/* F * F.T */
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
face_sym[j * 3 + k] = face[j * 3 + 0] * face[k * 3 + 0] +
face[j * 3 + 1] * face[k * 3 + 1] +
1;
}
}
/* check if one arc is obt arc */
for (int k = 0; k < 3; k++) {
const int k0 = k;
const int k1 = (k + 1) % 3;
const int k2 = (k + 2) % 3;
if ((p[k1][0] - p[k0][0]) * (p[k2][0] - p[k0][0]) + (p[k1][1] - p[k0][1]) * (p[k2][1] - p[k0][1]) < 0) {
face_obt[k0] = 1;
break;
}
}
}
template <typename scalar_t>
__global__ void forward_soft_rasterize_cuda_kernel(
const scalar_t* __restrict__ faces,
const scalar_t* __restrict__ textures,
const scalar_t* __restrict__ faces_info,
scalar_t* aggrs_info,
scalar_t* soft_colors,
scalar_t* depth_maps,
scalar_t* normal_maps,
int batch_size,
int num_faces,
int image_size,
int texture_size,
int texture_res,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
////////////////////////
////////////////////////
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * image_size * image_size) {
return;
}
const int is = image_size;
const int nf = num_faces;
const int bn = i / (is * is); // image index
const int pn = i % (is * is); // pixel index
const int yi = is - 1 - (pn / is); // y on image
const int xi = pn % is; // x on image
const scalar_t yp = (2. * yi + 1. - is) / is; // normalize to (-1, 1)
const scalar_t xp = (2. * xi + 1. - is) / is; // normalize to (-1, 1)
const scalar_t *face = &faces[bn * nf * 9] - 9; // find the one right before face, add back after
const scalar_t *texture = &textures[bn * nf * texture_size * 3] - texture_size * 3;
const scalar_t *face_info = &faces_info[bn * nf * 27] - 27; // find the one right before face_info, add back after
const scalar_t threshold = dist_eps * sigma_val;
// Initialize pixel color
scalar_t soft_color[4] = {1., 1., 1., 0.};
if (func_id_alpha == 2) soft_color[3] = 1.;
scalar_t softmax_sum = exp(eps / gamma_val);
scalar_t softmax_max = eps;
scalar_t normal_vector_min[3] = {0., 0., 0.};
for (int k = 0; k < 3; k++) {
if (func_id_rgb == 0) { // hard assign, set to background
soft_color[k] = soft_colors[(bn * 4 + k) * (is * is) + pn];
} else
if (func_id_rgb == 1) {
soft_color[k] = soft_colors[(bn * 4 + k) * (is * is) + pn] * softmax_sum; // initialize background color
}
}
scalar_t depth_min = 10000000;
scalar_t depth_min_map = 10000000;
int face_index_min = -1;
int face_index_min_map = -1;
for (int fn = 0; fn < nf; fn++) {
face += 9;
texture += texture_size * 3;
face_info += 27;
if (check_border(xp, yp, face, sqrt(threshold))) continue; // triangle too far away from pixel
scalar_t dis;
scalar_t dis_x;
scalar_t dis_y;
scalar_t t[3];
scalar_t w[3];
scalar_t w_clip[3];
scalar_t cur_norm_min[3];
scalar_t sign;
scalar_t soft_fragment;
// compute barycentric coordinate w
barycentric_coordinate(w, xp, yp, face_info);
// compute probability map based on distance functions
if (func_id_dist == 0) { // hard assign
soft_fragment = check_pixel_inside(w) ? 1. : 0.;
if (soft_fragment == 0.) continue; // ignore triangle outside of the pixel
} else
if (func_id_dist == 1) { // barycentric distance
forward_barycentric_p2f_distance(dis, w);
if (-dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-dis / sigma_val));
} else
if (func_id_dist == 2) { // euclidean distance
euclidean_p2f_distance(sign, dis_x, dis_y, w, t, face, face_info, xp, yp);
dis = dis_x * dis_x + dis_y * dis_y;
if (sign < 0 && dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-sign * dis / sigma_val));
}
/////////////////////////////////////////////////////
// aggragate for alpha channel
if (func_id_alpha == 0) { // hard assign
if (soft_fragment > 0.5) soft_color[3] = 1.;
} else
if (func_id_alpha == 1) { // Sum
soft_color[3] += soft_fragment;
} else
if (func_id_alpha == 2) { // Logical-Or
soft_color[3] *= 1. - soft_fragment;
}
/////////////////////////////////////////////////////
for (int k = 0; k < 3; k++) w_clip[k] = w[k];
barycentric_clip(w_clip);
const scalar_t zp = 1. / (w_clip[0] / face[2] + w_clip[1] / face[5] + w_clip[2] / face[8]);
if (zp < near || zp > far) continue; // triangle out of screen, pass
/////////////////////////////////////////////////////
// aggregate for rgb channels
if (zp < depth_min_map) {
depth_min_map = zp;
face_index_min_map = fn;
normal_face(face, cur_norm_min);
for (int k = 0; k < 3; k++)
{
normal_vector_min[k] = cur_norm_min[k];
}
}
if (func_id_rgb == 0) { // Hard assign
if (zp < depth_min && check_pixel_inside(w) && (double_side || check_face_frontside(face))) {
depth_min = zp;
face_index_min = fn;
for (int k = 0; k < 3; k++) {
soft_color[k] = forward_sample_texture(texture, w_clip, texture_res, k, texture_sample_type);
}
}
} else
if (func_id_rgb == 1) { // D * Softmax (Z)
if (check_face_frontside(face) || double_side) {
const scalar_t zp_norm = (far - zp) / (far - near);
scalar_t exp_delta_zp = 1.;
if (zp_norm > softmax_max) {
exp_delta_zp = exp((softmax_max - zp_norm) / gamma_val);
softmax_max = zp_norm;
}
const scalar_t exp_z = exp((zp_norm - softmax_max) / gamma_val);
softmax_sum = exp_delta_zp * softmax_sum + exp_z * soft_fragment;
for (int k = 0; k < 3; k++) {
const scalar_t color_k = forward_sample_texture(texture, w_clip, texture_res, k, texture_sample_type);
soft_color[k] = exp_delta_zp * soft_color[k] + exp_z * soft_fragment * color_k;// * soft_fragment;
}
}
}
}
//////////////////////////////////////////////
if (0 <= face_index_min_map) {
depth_maps[(bn * 1 + 0) * (is * is) + pn] = depth_min_map;
for (int k = 0; k < 3; k++)
{
normal_maps[(bn * 3 + k) * (is * is) + pn] = normal_vector_min[k];
}
}
//////////////////////////////////////////////
// finalize aggregation
if (func_id_alpha == 0) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = soft_color[3];
} else
if (func_id_alpha == 1) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = soft_color[3] / nf;
} else
if (func_id_alpha == 2) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = 1. - soft_color[3];
}
if (func_id_rgb == 0) {
if (face_index_min != -1)
for (int k = 0; k < 3; k++) {
soft_colors[(bn * 4 + k) * (is * is) + pn] = soft_color[k];
}
aggrs_info[(bn * 2 + 0) * (is * is) + pn] = depth_min;
aggrs_info[(bn * 2 + 1) * (is * is) + pn] = face_index_min;
} else
if (func_id_rgb == 1) {
for (int k = 0; k < 3; k++) {
soft_colors[(bn * 4 + k) * (is * is) + pn] = soft_color[k] / softmax_sum;
}
aggrs_info[(bn * 2 + 0) * (is * is) + pn] = softmax_sum;
aggrs_info[(bn * 2 + 1) * (is * is) + pn] = softmax_max;
}
}
template <typename scalar_t>
__global__ void backward_soft_rasterize_cuda_kernel(
const scalar_t* __restrict__ faces,
const scalar_t* __restrict__ textures,
const scalar_t* __restrict__ soft_colors,
const scalar_t* __restrict__ faces_info,
const scalar_t* __restrict__ aggrs_info, // 0: sum, 1: max z*D
scalar_t* grad_faces,
scalar_t* grad_textures,
scalar_t* grad_soft_colors,
int batch_size,
int num_faces,
int image_size,
int texture_size,
int texture_res,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
////////////////////////
////////////////////////
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * image_size * image_size) {
return;
}
const int is = image_size;
const int nf = num_faces;
const int bn = i / (is * is);
const int pn = i % (is * is);
const int yi = is - 1 - (pn / is);
const int xi = pn % is;
const scalar_t yp = (2. * yi + 1 - is) / is;
const scalar_t xp = (2. * xi + 1 - is) / is;
const scalar_t* face = &faces[bn * nf * 9] - 9;
const scalar_t* texture = &textures[bn * nf * texture_size * 3] - texture_size * 3;
const scalar_t* face_info = &faces_info[bn * nf * 27] - 27;
const scalar_t threshold = dist_eps * sigma_val;
const scalar_t softmax_sum = aggrs_info[(bn * 2 + 0) * (is * is) + pn];
const scalar_t softmax_max = aggrs_info[(bn * 2 + 1) * (is * is) + pn];
for (int fn = 0; fn < nf; fn++) {
face += 9;
texture += texture_size * 3;
face_info += 27;
if (check_border(xp, yp, face, sqrt(threshold))) continue;
scalar_t dis;
scalar_t dis_x;
scalar_t dis_y;
scalar_t t[3];
scalar_t w[3];
scalar_t w0[3];
scalar_t sign;
scalar_t soft_fragment;
barycentric_coordinate(w, xp, yp, face_info);
// compute probability map based on distance functions
if (func_id_dist == 0) { // hard assign
soft_fragment = check_pixel_inside(w) ? 1. : 0.;
if (soft_fragment == 0.) continue; // ???
} else
if (func_id_dist == 1) { // barycentric distance
forward_barycentric_p2f_distance(dis, w);
for (int k = 0; k < 3; k++) t[k] = w[k];
if (-dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-dis / sigma_val));
} else
if (func_id_dist == 2) { // euclidean distance
euclidean_p2f_distance(sign, dis_x, dis_y, w, t, face, face_info, xp, yp);
dis = dis_x * dis_x + dis_y * dis_y;
if (sign < 0 && dis >= threshold) continue; // ignore triangle too far away from the pixel
soft_fragment = 1. / (1. + exp(-sign * dis / sigma_val));
}
scalar_t* grad_face = &grad_faces[(bn * nf + fn) * 9];
scalar_t* grad_texture = &grad_textures[(bn * nf + fn) * texture_size * 3];
scalar_t grad_v[3][3] = {0};
scalar_t C_grad_xy = 0;
/////////////////////////////////////////////////////
// aggragate for alpha channel
scalar_t C_grad_xy_alpha = grad_soft_colors[(bn * 4 + 3) * (is * is) + pn];
if (func_id_alpha == 0) { // hard assign
// hard assign alpha channels does not have gradient
} else
if (func_id_alpha == 1) { // Sum
C_grad_xy_alpha /= nf;
} else
if (func_id_alpha == 2) { // Logical-Or
C_grad_xy_alpha *= (1 - soft_colors[(bn * 4 + 3) * (is * is) + pn]) / max(1 - soft_fragment, 1e-6);
}
C_grad_xy += C_grad_xy_alpha;
/////////////////////////////////////////////////////
for (int k = 0; k < 3; k++) w0[k] = w[k];
barycentric_clip(w);
const scalar_t zp = 1. / (w[0] / face[2] + w[1] / face[5] + w[2] / face[8]);
if (zp < near || zp > far) continue; // triangle out of screen, pass
// aggregate for rgb channels
if (func_id_rgb == 0) { // Hard assign, no gradient to xyz
if (fn == softmax_max) {
for (int k = 0; k < 3; k++) {
for (int j = 0; j < texture_size; j++) {
atomicAdd(&grad_texture[3 * j + k], backward_sample_texture(grad_soft_colors[(bn * 4 + k) * (is * is) + pn], w, texture_res, j, texture_sample_type));
}
}
}
} else
if (func_id_rgb == 1 && (check_face_frontside(face) || double_side)) { // Softmax (Z * D)
scalar_t C_grad_xyz_rgb = 0.;
const scalar_t zp_norm = (far - zp) / (far - near);
const scalar_t zp_softmax = soft_fragment * exp((zp_norm - softmax_max) / gamma_val) / softmax_sum;
for (int k = 0; k < 3; k++) {
const scalar_t grad_soft_color_k = grad_soft_colors[(bn * 4 + k) * (is * is) + pn];
for (int j = 0; j < texture_size; j++) {
const scalar_t grad_t = backward_sample_texture(grad_soft_color_k, w, texture_res, j, texture_sample_type);
atomicAdd(&grad_texture[3 * j + k], zp_softmax * grad_t);
}
const scalar_t color_k = forward_sample_texture(texture, w, texture_res, k, texture_sample_type);
C_grad_xyz_rgb += grad_soft_color_k * (color_k - soft_colors[(bn * 4 + k) * (is * is) + pn]);
}
C_grad_xyz_rgb *= zp_softmax;
C_grad_xy += C_grad_xyz_rgb / soft_fragment;
const scalar_t C_grad_z_rgb = C_grad_xyz_rgb / gamma_val / (near - far) * zp * zp;
grad_v[0][2] = C_grad_z_rgb * w[0] / face[2] / face[2];
grad_v[1][2] = C_grad_z_rgb * w[1] / face[5] / face[5];
grad_v[2][2] = C_grad_z_rgb * w[2] / face[8] / face[8];
}
/////////////////////////////////////////////////////
C_grad_xy *= soft_fragment * (1 - soft_fragment) / sigma_val; // sigmoid gradient
// compute probability map gradient based on distance functions
if (func_id_dist == 1) { // barycentric distance
backward_barycentric_p2f_distance(grad_v, t, face_info, xp, yp, dis, C_grad_xy);
} else
if (func_id_dist == 2) { // euclidean distance
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 2; l++) {
grad_v[k][l] = 2 * sign * C_grad_xy * (t[k] + w0[k]) * (l == 0 ? dis_x : dis_y);
}
}
}
atomicAdd(&grad_face[0], grad_v[0][0]);
atomicAdd(&grad_face[1], grad_v[0][1]);
atomicAdd(&grad_face[3], grad_v[1][0]);
atomicAdd(&grad_face[4], grad_v[1][1]);
atomicAdd(&grad_face[6], grad_v[2][0]);
atomicAdd(&grad_face[7], grad_v[2][1]);
atomicAdd(&grad_face[2], grad_v[0][2]);
atomicAdd(&grad_face[5], grad_v[1][2]);
atomicAdd(&grad_face[8], grad_v[2][2]);
}
}
}
std::vector<at::Tensor> forward_soft_rasterize_cuda(
at::Tensor faces,
at::Tensor textures,
at::Tensor faces_info,
at::Tensor aggrs_info,
at::Tensor soft_colors,
at::Tensor depth_maps,
at::Tensor normal_maps,
int image_size,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto texture_size = textures.size(2);
const auto texture_res = int(sqrt(texture_size));
const int threads = 512;
const dim3 blocks_1 ((batch_size * num_faces - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_soft_rasterize_inv_cuda", ([&] {
hipLaunchKernelGGL(( forward_soft_rasterize_inv_cuda_kernel<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0,
faces.data<scalar_t>(),
faces_info.data<scalar_t>(),
batch_size,
num_faces,
image_size);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in forward_transform_inv_triangle: %s\n", hipGetErrorString(err));
const dim3 blocks_2 ((batch_size * image_size * image_size - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_eff_soft_rasterize_cuda", ([&] {
hipLaunchKernelGGL(( forward_soft_rasterize_cuda_kernel<scalar_t>), dim3(blocks_2), dim3(threads), 0, 0,
faces.data<scalar_t>(),
textures.data<scalar_t>(),
faces_info.data<scalar_t>(),
aggrs_info.data<scalar_t>(),
soft_colors.data<scalar_t>(),
depth_maps.data<scalar_t>(),
normal_maps.data<scalar_t>(),
batch_size,
num_faces,
image_size,
texture_size,
texture_res,
near,
far,
eps,
sigma_val,
func_id_dist,
dist_eps,
gamma_val,
func_id_rgb,
func_id_alpha,
texture_sample_type,
double_side);
}));
err = hipGetLastError();
if (err != hipSuccess)
printf("Error in forward_soft_rasterize: %s\n", hipGetErrorString(err));
return {faces_info, aggrs_info, soft_colors, depth_maps, normal_maps};
}
std::vector<at::Tensor> backward_soft_rasterize_cuda(
at::Tensor faces,
at::Tensor textures,
at::Tensor soft_colors,
at::Tensor faces_info,
at::Tensor aggrs_info,
at::Tensor grad_faces,
at::Tensor grad_textures,
at::Tensor grad_soft_colors,
int image_size,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto texture_size = textures.size(2);
const auto texture_res = int(sqrt(texture_size));
const int threads = 512;
const dim3 blocks ((batch_size * image_size * image_size - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "backward_soft_rasterize_cuda", ([&] {
hipLaunchKernelGGL(( backward_soft_rasterize_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
faces.data<scalar_t>(),
textures.data<scalar_t>(),
soft_colors.data<scalar_t>(),
faces_info.data<scalar_t>(),
aggrs_info.data<scalar_t>(),
grad_faces.data<scalar_t>(),
grad_textures.data<scalar_t>(),
grad_soft_colors.data<scalar_t>(),
batch_size,
num_faces,
image_size,
texture_size,
texture_res,
near,
far,
eps,
sigma_val,
func_id_dist,
dist_eps,
gamma_val,
func_id_rgb,
func_id_alpha,
texture_sample_type,
double_side);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in backward_soft_rasterize: %s\n", hipGetErrorString(err));
return {grad_faces, grad_textures};
}
| fe522c96c030c2eb3e84d3160b3d2306e1920ccd.cu | #include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
// for the older gpus atomicAdd with double arguments does not exist
#if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__)
static __inline__ __device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old);
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
namespace{
template <typename scalar_t>
__device__ __forceinline__ void barycentric_coordinate(scalar_t *w, const scalar_t x, const scalar_t y, const scalar_t *face_info) {
w[0] = face_info[3 * 0 + 0] * x + face_info[3 * 0 + 1] * y + face_info[3 * 0 + 2];
w[1] = face_info[3 * 1 + 0] * x + face_info[3 * 1 + 1] * y + face_info[3 * 1 + 2];
w[2] = face_info[3 * 2 + 0] * x + face_info[3 * 2 + 1] * y + face_info[3 * 2 + 2];
}
template <typename scalar_t>
__device__ __forceinline__ bool check_border(const scalar_t x, const scalar_t y, const scalar_t *face, const scalar_t threshold) {
return (x > max(max(face[0], face[3]), face[6]) + threshold ||
x < min(min(face[0], face[3]), face[6]) - threshold ||
y > max(max(face[1], face[4]), face[7]) + threshold ||
y < min(min(face[1], face[4]), face[7]) - threshold);
}
template <typename scalar_t>
__device__ __forceinline__ bool check_face_frontside(const scalar_t *face) {
return (face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]);
}
template <typename scalar_t>
__device__ __forceinline__ bool check_pixel_inside(const scalar_t *w) {
return w[0] <= 1 && w[0] >= 0 && w[1] <= 1 && w[1] >= 0 && w[2] <= 1 && w[2] >= 0;
}
template <typename scalar_t>
__device__ __forceinline__ void barycentric_clip(scalar_t *w) {
for (int k = 0; k < 3; k++) w[k] = max(min(w[k], 1.), 0.);
const scalar_t w_sum = max(w[0] + w[1] + w[2], 1e-5);
for (int k = 0; k < 3; k++) w[k] /= w_sum;
}
template <typename scalar_t>
__device__ __forceinline__ void euclidean_p2f_distance(scalar_t &sign, scalar_t &dis_x, scalar_t &dis_y,
scalar_t *w, scalar_t *t,
const scalar_t* face, const scalar_t *face_info,
const scalar_t xp, const scalar_t yp) {
const scalar_t *face_sym = face_info + 9;
const scalar_t *face_obt = face_info + 18;
if (w[0] > 0 && w[1] > 0 && w[2] > 0 &&
w[0] < 1 && w[1] < 1 && w[2] < 1) {
// inside the triangle, w[0] + w[1] + w[2] = 0
scalar_t dis_min = 100000000;
scalar_t dis_x_min = 0;
scalar_t dis_y_min = 0;
scalar_t a0[3];
scalar_t t0[3];
for (int k = 0; k < 3; k++) {
int v0 = k;
int v1 = (k + 1) % 3;
int v2 = (k + 2) % 3;
a0[0] = face_sym[3 * v0 + 0] - face_sym[3 * v1 + 0];
a0[1] = face_sym[3 * v0 + 1] - face_sym[3 * v1 + 1];
a0[2] = face_sym[3 * v0 + 2] - face_sym[3 * v1 + 2];
t0[v0] = (w[0] * a0[0] + w[1] * a0[1] + w[2] * a0[2] - a0[v1]) / (a0[v0] - a0[v1]);
t0[v1] = 1 - t0[v0];
t0[v2] = 0;
t0[0] -= w[0];
t0[1] -= w[1];
t0[2] -= w[2];
// calculate distance
dis_x = t0[0] * face[0] + t0[1] * face[3] + t0[2] * face[6];
dis_y = t0[0] * face[1] + t0[1] * face[4] + t0[2] * face[7];
scalar_t dis = dis_x * dis_x + dis_y * dis_y;
if (dis < dis_min) {
dis_min = dis;
dis_x_min = dis_x;
dis_y_min = dis_y;
t[0] = t0[0];
t[1] = t0[1];
t[2] = t0[2];
}
}
dis_x = dis_x_min;
dis_y = dis_y_min;
sign = 1;
} else {
int v0 = -1;
if (w[1] <= 0 && w[2] <= 0) {
v0 = 0;
if (face_obt[0] == 1 && (xp - face[0]) * (face[6] - face[0]) + (yp - face[1]) * (face[7] - face[1]) > 0) v0 = 2;
} else if (w[2] <= 0 && w[0] <= 0) {
v0 = 1;
if (face_obt[1] == 1 && (xp - face[3]) * (face[0] - face[3]) + (yp - face[4]) * (face[1] - face[4]) > 0) v0 = 0;
} else if (w[0] <= 0 && w[1] <= 0) {
v0 = 2;
if (face_obt[2] == 1 && (xp - face[6]) * (face[3] - face[6]) + (yp - face[7]) * (face[4] - face[7]) > 0) v0 = 1;
} else
if (w[0] <= 0) v0 = 1;
else if (w[1] <= 0) v0 = 2;
else if (w[2] <= 0) v0 = 0;
const int v1 = (v0 + 1) % 3;
const int v2 = (v0 + 2) % 3;
scalar_t a0[3];
a0[0] = face_sym[3 * v0 + 0] - face_sym[3 * v1 + 0];
a0[1] = face_sym[3 * v0 + 1] - face_sym[3 * v1 + 1];
a0[2] = face_sym[3 * v0 + 2] - face_sym[3 * v1 + 2];
t[v0] = (w[0] * a0[0] + w[1] * a0[1] + w[2] * a0[2] - a0[v1]) / (a0[v0] - a0[v1]);
t[v1] = 1 - t[v0];
t[v2] = 0;
// clamp to [0, 1]
for (int k = 0; k < 3; k++) {
t[k] = min(max(t[k], 0.), 1.);
t[k] -= w[k];
}
// calculate distance
dis_x = t[0] * face[0] + t[1] * face[3] + t[2] * face[6];
dis_y = t[0] * face[1] + t[1] * face[4] + t[2] * face[7];
sign = -1;
}
}
template <typename scalar_t>
__device__ __forceinline__ void forward_barycentric_p2f_distance(scalar_t &dis, const scalar_t *w) {
dis = w[0] > w[1] ? (w[1] > w[2] ? w[2] : w[1]) : (w[0] > w[2] ? w[2] : w[0]);
dis = dis > 0 ? pow(dis, 2) : -pow(dis, 2);
}
template <typename scalar_t>
__device__ __forceinline__ void backward_barycentric_p2f_distance(scalar_t grad_v[3][3], const scalar_t *w, const scalar_t *face_info, const scalar_t xp, const scalar_t yp, const scalar_t dis, const scalar_t C) {
const int p = w[0] > w[1] ? (w[1] > w[2] ? 2 : 1) : (w[0] > w[2] ? 2 : 0);
const scalar_t *face_inv = face_info;
for (int l = 0; l < 2; l++) {
for (int k = 0; k < 3; k++) {
scalar_t grad_kl = 0;
for (int q = 0; q < 3; q++) {
grad_kl += -face_inv[3*p+l] * face_inv[3*k+q] * (q == 0 ? xp : (q == 1 ? yp : 1));
}
grad_v[k][l] = grad_kl * C;
grad_v[k][l] *= dis > 0 ? (2. * sqrt(dis)) : (2. * sqrt(-dis));
}
}
}
template <typename scalar_t>
__device__ __forceinline__ void normal_face(const scalar_t *face, scalar_t *normal) {
scalar_t nx = (face[7] - face[1]) * (face[5] - face[2]) - (face[8] - face[2]) * (face[4] - face[1]);
scalar_t ny = (face[8] - face[2]) * (face[3] - face[0]) - (face[6] - face[0]) * (face[5] - face[2]);
scalar_t nz = (face[6] - face[0]) * (face[4] - face[1]) - (face[7] - face[1]) * (face[3] - face[0]);
scalar_t norm = sqrt(nx * nx + ny * ny + nz * nz);
nx = nx / norm;
ny = ny / norm;
nz = nz / norm;
nx = (nz > 0) ? nx : -nx;
ny = (nz > 0) ? ny : -ny;
nz = (nz > 0) ? nz : -nz;
normal[0] = -nx * 0.5 + 0.5;
normal[1] = -ny * 0.5 + 0.5;
normal[2] = nz * 0.5 + 0.5;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t forward_sample_texture(const scalar_t *texture, const scalar_t *w, const int R, const int k, const int texture_sample_type) {
scalar_t texture_k;
if (texture_sample_type == 0) { // sample surface color with resolution as R
const int w_x = w[0] * R;
const int w_y = w[1] * R;
if ((w[0] + w[1]) * R - w_x - w_y <= 1) {
texture_k = texture[(w_y * R + w_x) * 3 + k];
} else {
texture_k = texture[((R - 1 - w_y) * R + (R - 1 - w_x)) * 3 + k];
}
} else
if (texture_sample_type == 1) { // sample vertex color
texture_k = w[0] * texture[k] + w[1] * texture[3+k] + w[2] * texture[6+k];
}
return texture_k;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t backward_sample_texture(const scalar_t grad_color, const scalar_t *w, const int R, const int k, const int texture_sample_type) {
scalar_t grad_texture_k;
if (texture_sample_type == 0) { // sample surface color with resolution as R
const int w_x = w[0] * R;
const int w_y = w[1] * R;
if ((w[0] + w[1]) * R - w_x - w_y <= 1) {
if (k == w_y * R + w_x) {
grad_texture_k = grad_color;
}
} else {
if (k == (R - 1 - w_y) * R + (R - 1 - w_x)) {
grad_texture_k = grad_color;
}
}
} else
if (texture_sample_type == 1) {
grad_texture_k = w[k] * grad_color;
}
return grad_texture_k;
}
// triangle preprocessing
template <typename scalar_t>
__global__ void forward_soft_rasterize_inv_cuda_kernel(
const scalar_t* __restrict__ faces,
scalar_t* faces_info,
int batch_size,
int num_faces,
int image_size) {
/* batch number, face, number, image size, face[v012][RGB] */
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * num_faces) {
return;
}
// const int is = image_size;
const scalar_t* face = &faces[i * 9];
scalar_t* face_inv = &faces_info[i * 27];
scalar_t* face_sym = &faces_info[i * 27+9];
scalar_t* face_obt = &faces_info[i * 27+18];
/* return if backside */
// if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]))
// return;
/* p[num][xy]: x, y is (-1, 1). */
// x, y in p, for z is depth
scalar_t p[3][2];
for (int num = 0; num < 3; num++) {
for (int dim = 0; dim < 2; dim++) {
p[num][dim] = face[3 * num + dim]; // no normalize
}
}
/* compute face_inv */
scalar_t face_inv_star[9] = {
p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1],
p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1],
p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]};
scalar_t face_inv_determinant = (
p[2][0] * (p[0][1] - p[1][1]) +
p[0][0] * (p[1][1] - p[2][1]) +
p[1][0] * (p[2][1] - p[0][1]));
face_inv_determinant = face_inv_determinant > 0 ? max(face_inv_determinant, 1e-10) : min(face_inv_determinant, -1e-10);
/* set to global memory */
for (int k = 0; k < 9; k++) {
face_inv[k] = face_inv_star[k] / face_inv_determinant;
}
/* F * F.T */
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 3; k++) {
face_sym[j * 3 + k] = face[j * 3 + 0] * face[k * 3 + 0] +
face[j * 3 + 1] * face[k * 3 + 1] +
1;
}
}
/* check if one arc is obt arc */
for (int k = 0; k < 3; k++) {
const int k0 = k;
const int k1 = (k + 1) % 3;
const int k2 = (k + 2) % 3;
if ((p[k1][0] - p[k0][0]) * (p[k2][0] - p[k0][0]) + (p[k1][1] - p[k0][1]) * (p[k2][1] - p[k0][1]) < 0) {
face_obt[k0] = 1;
break;
}
}
}
template <typename scalar_t>
__global__ void forward_soft_rasterize_cuda_kernel(
const scalar_t* __restrict__ faces,
const scalar_t* __restrict__ textures,
const scalar_t* __restrict__ faces_info,
scalar_t* aggrs_info,
scalar_t* soft_colors,
scalar_t* depth_maps,
scalar_t* normal_maps,
int batch_size,
int num_faces,
int image_size,
int texture_size,
int texture_res,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
////////////////////////
////////////////////////
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * image_size * image_size) {
return;
}
const int is = image_size;
const int nf = num_faces;
const int bn = i / (is * is); // image index
const int pn = i % (is * is); // pixel index
const int yi = is - 1 - (pn / is); // y on image
const int xi = pn % is; // x on image
const scalar_t yp = (2. * yi + 1. - is) / is; // normalize to (-1, 1)
const scalar_t xp = (2. * xi + 1. - is) / is; // normalize to (-1, 1)
const scalar_t *face = &faces[bn * nf * 9] - 9; // find the one right before face, add back after
const scalar_t *texture = &textures[bn * nf * texture_size * 3] - texture_size * 3;
const scalar_t *face_info = &faces_info[bn * nf * 27] - 27; // find the one right before face_info, add back after
const scalar_t threshold = dist_eps * sigma_val;
// Initialize pixel color
scalar_t soft_color[4] = {1., 1., 1., 0.};
if (func_id_alpha == 2) soft_color[3] = 1.;
scalar_t softmax_sum = exp(eps / gamma_val);
scalar_t softmax_max = eps;
scalar_t normal_vector_min[3] = {0., 0., 0.};
for (int k = 0; k < 3; k++) {
if (func_id_rgb == 0) { // hard assign, set to background
soft_color[k] = soft_colors[(bn * 4 + k) * (is * is) + pn];
} else
if (func_id_rgb == 1) {
soft_color[k] = soft_colors[(bn * 4 + k) * (is * is) + pn] * softmax_sum; // initialize background color
}
}
scalar_t depth_min = 10000000;
scalar_t depth_min_map = 10000000;
int face_index_min = -1;
int face_index_min_map = -1;
for (int fn = 0; fn < nf; fn++) {
face += 9;
texture += texture_size * 3;
face_info += 27;
if (check_border(xp, yp, face, sqrt(threshold))) continue; // triangle too far away from pixel
scalar_t dis;
scalar_t dis_x;
scalar_t dis_y;
scalar_t t[3];
scalar_t w[3];
scalar_t w_clip[3];
scalar_t cur_norm_min[3];
scalar_t sign;
scalar_t soft_fragment;
// compute barycentric coordinate w
barycentric_coordinate(w, xp, yp, face_info);
// compute probability map based on distance functions
if (func_id_dist == 0) { // hard assign
soft_fragment = check_pixel_inside(w) ? 1. : 0.;
if (soft_fragment == 0.) continue; // ignore triangle outside of the pixel
} else
if (func_id_dist == 1) { // barycentric distance
forward_barycentric_p2f_distance(dis, w);
if (-dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-dis / sigma_val));
} else
if (func_id_dist == 2) { // euclidean distance
euclidean_p2f_distance(sign, dis_x, dis_y, w, t, face, face_info, xp, yp);
dis = dis_x * dis_x + dis_y * dis_y;
if (sign < 0 && dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-sign * dis / sigma_val));
}
/////////////////////////////////////////////////////
// aggragate for alpha channel
if (func_id_alpha == 0) { // hard assign
if (soft_fragment > 0.5) soft_color[3] = 1.;
} else
if (func_id_alpha == 1) { // Sum
soft_color[3] += soft_fragment;
} else
if (func_id_alpha == 2) { // Logical-Or
soft_color[3] *= 1. - soft_fragment;
}
/////////////////////////////////////////////////////
for (int k = 0; k < 3; k++) w_clip[k] = w[k];
barycentric_clip(w_clip);
const scalar_t zp = 1. / (w_clip[0] / face[2] + w_clip[1] / face[5] + w_clip[2] / face[8]);
if (zp < near || zp > far) continue; // triangle out of screen, pass
/////////////////////////////////////////////////////
// aggregate for rgb channels
if (zp < depth_min_map) {
depth_min_map = zp;
face_index_min_map = fn;
normal_face(face, cur_norm_min);
for (int k = 0; k < 3; k++)
{
normal_vector_min[k] = cur_norm_min[k];
}
}
if (func_id_rgb == 0) { // Hard assign
if (zp < depth_min && check_pixel_inside(w) && (double_side || check_face_frontside(face))) {
depth_min = zp;
face_index_min = fn;
for (int k = 0; k < 3; k++) {
soft_color[k] = forward_sample_texture(texture, w_clip, texture_res, k, texture_sample_type);
}
}
} else
if (func_id_rgb == 1) { // D * Softmax (Z)
if (check_face_frontside(face) || double_side) {
const scalar_t zp_norm = (far - zp) / (far - near);
scalar_t exp_delta_zp = 1.;
if (zp_norm > softmax_max) {
exp_delta_zp = exp((softmax_max - zp_norm) / gamma_val);
softmax_max = zp_norm;
}
const scalar_t exp_z = exp((zp_norm - softmax_max) / gamma_val);
softmax_sum = exp_delta_zp * softmax_sum + exp_z * soft_fragment;
for (int k = 0; k < 3; k++) {
const scalar_t color_k = forward_sample_texture(texture, w_clip, texture_res, k, texture_sample_type);
soft_color[k] = exp_delta_zp * soft_color[k] + exp_z * soft_fragment * color_k;// * soft_fragment;
}
}
}
}
//////////////////////////////////////////////
if (0 <= face_index_min_map) {
depth_maps[(bn * 1 + 0) * (is * is) + pn] = depth_min_map;
for (int k = 0; k < 3; k++)
{
normal_maps[(bn * 3 + k) * (is * is) + pn] = normal_vector_min[k];
}
}
//////////////////////////////////////////////
// finalize aggregation
if (func_id_alpha == 0) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = soft_color[3];
} else
if (func_id_alpha == 1) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = soft_color[3] / nf;
} else
if (func_id_alpha == 2) {
soft_colors[(bn * 4 + 3) * (is * is) + pn] = 1. - soft_color[3];
}
if (func_id_rgb == 0) {
if (face_index_min != -1)
for (int k = 0; k < 3; k++) {
soft_colors[(bn * 4 + k) * (is * is) + pn] = soft_color[k];
}
aggrs_info[(bn * 2 + 0) * (is * is) + pn] = depth_min;
aggrs_info[(bn * 2 + 1) * (is * is) + pn] = face_index_min;
} else
if (func_id_rgb == 1) {
for (int k = 0; k < 3; k++) {
soft_colors[(bn * 4 + k) * (is * is) + pn] = soft_color[k] / softmax_sum;
}
aggrs_info[(bn * 2 + 0) * (is * is) + pn] = softmax_sum;
aggrs_info[(bn * 2 + 1) * (is * is) + pn] = softmax_max;
}
}
template <typename scalar_t>
__global__ void backward_soft_rasterize_cuda_kernel(
const scalar_t* __restrict__ faces,
const scalar_t* __restrict__ textures,
const scalar_t* __restrict__ soft_colors,
const scalar_t* __restrict__ faces_info,
const scalar_t* __restrict__ aggrs_info, // 0: sum, 1: max z*D
scalar_t* grad_faces,
scalar_t* grad_textures,
scalar_t* grad_soft_colors,
int batch_size,
int num_faces,
int image_size,
int texture_size,
int texture_res,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
////////////////////////
////////////////////////
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * image_size * image_size) {
return;
}
const int is = image_size;
const int nf = num_faces;
const int bn = i / (is * is);
const int pn = i % (is * is);
const int yi = is - 1 - (pn / is);
const int xi = pn % is;
const scalar_t yp = (2. * yi + 1 - is) / is;
const scalar_t xp = (2. * xi + 1 - is) / is;
const scalar_t* face = &faces[bn * nf * 9] - 9;
const scalar_t* texture = &textures[bn * nf * texture_size * 3] - texture_size * 3;
const scalar_t* face_info = &faces_info[bn * nf * 27] - 27;
const scalar_t threshold = dist_eps * sigma_val;
const scalar_t softmax_sum = aggrs_info[(bn * 2 + 0) * (is * is) + pn];
const scalar_t softmax_max = aggrs_info[(bn * 2 + 1) * (is * is) + pn];
for (int fn = 0; fn < nf; fn++) {
face += 9;
texture += texture_size * 3;
face_info += 27;
if (check_border(xp, yp, face, sqrt(threshold))) continue;
scalar_t dis;
scalar_t dis_x;
scalar_t dis_y;
scalar_t t[3];
scalar_t w[3];
scalar_t w0[3];
scalar_t sign;
scalar_t soft_fragment;
barycentric_coordinate(w, xp, yp, face_info);
// compute probability map based on distance functions
if (func_id_dist == 0) { // hard assign
soft_fragment = check_pixel_inside(w) ? 1. : 0.;
if (soft_fragment == 0.) continue; // ???
} else
if (func_id_dist == 1) { // barycentric distance
forward_barycentric_p2f_distance(dis, w);
for (int k = 0; k < 3; k++) t[k] = w[k];
if (-dis >= threshold) continue; // ignore triangle far away from the pixel
soft_fragment = 1. / (1. + exp(-dis / sigma_val));
} else
if (func_id_dist == 2) { // euclidean distance
euclidean_p2f_distance(sign, dis_x, dis_y, w, t, face, face_info, xp, yp);
dis = dis_x * dis_x + dis_y * dis_y;
if (sign < 0 && dis >= threshold) continue; // ignore triangle too far away from the pixel
soft_fragment = 1. / (1. + exp(-sign * dis / sigma_val));
}
scalar_t* grad_face = &grad_faces[(bn * nf + fn) * 9];
scalar_t* grad_texture = &grad_textures[(bn * nf + fn) * texture_size * 3];
scalar_t grad_v[3][3] = {0};
scalar_t C_grad_xy = 0;
/////////////////////////////////////////////////////
// aggragate for alpha channel
scalar_t C_grad_xy_alpha = grad_soft_colors[(bn * 4 + 3) * (is * is) + pn];
if (func_id_alpha == 0) { // hard assign
// hard assign alpha channels does not have gradient
} else
if (func_id_alpha == 1) { // Sum
C_grad_xy_alpha /= nf;
} else
if (func_id_alpha == 2) { // Logical-Or
C_grad_xy_alpha *= (1 - soft_colors[(bn * 4 + 3) * (is * is) + pn]) / max(1 - soft_fragment, 1e-6);
}
C_grad_xy += C_grad_xy_alpha;
/////////////////////////////////////////////////////
for (int k = 0; k < 3; k++) w0[k] = w[k];
barycentric_clip(w);
const scalar_t zp = 1. / (w[0] / face[2] + w[1] / face[5] + w[2] / face[8]);
if (zp < near || zp > far) continue; // triangle out of screen, pass
// aggregate for rgb channels
if (func_id_rgb == 0) { // Hard assign, no gradient to xyz
if (fn == softmax_max) {
for (int k = 0; k < 3; k++) {
for (int j = 0; j < texture_size; j++) {
atomicAdd(&grad_texture[3 * j + k], backward_sample_texture(grad_soft_colors[(bn * 4 + k) * (is * is) + pn], w, texture_res, j, texture_sample_type));
}
}
}
} else
if (func_id_rgb == 1 && (check_face_frontside(face) || double_side)) { // Softmax (Z * D)
scalar_t C_grad_xyz_rgb = 0.;
const scalar_t zp_norm = (far - zp) / (far - near);
const scalar_t zp_softmax = soft_fragment * exp((zp_norm - softmax_max) / gamma_val) / softmax_sum;
for (int k = 0; k < 3; k++) {
const scalar_t grad_soft_color_k = grad_soft_colors[(bn * 4 + k) * (is * is) + pn];
for (int j = 0; j < texture_size; j++) {
const scalar_t grad_t = backward_sample_texture(grad_soft_color_k, w, texture_res, j, texture_sample_type);
atomicAdd(&grad_texture[3 * j + k], zp_softmax * grad_t);
}
const scalar_t color_k = forward_sample_texture(texture, w, texture_res, k, texture_sample_type);
C_grad_xyz_rgb += grad_soft_color_k * (color_k - soft_colors[(bn * 4 + k) * (is * is) + pn]);
}
C_grad_xyz_rgb *= zp_softmax;
C_grad_xy += C_grad_xyz_rgb / soft_fragment;
const scalar_t C_grad_z_rgb = C_grad_xyz_rgb / gamma_val / (near - far) * zp * zp;
grad_v[0][2] = C_grad_z_rgb * w[0] / face[2] / face[2];
grad_v[1][2] = C_grad_z_rgb * w[1] / face[5] / face[5];
grad_v[2][2] = C_grad_z_rgb * w[2] / face[8] / face[8];
}
/////////////////////////////////////////////////////
C_grad_xy *= soft_fragment * (1 - soft_fragment) / sigma_val; // sigmoid gradient
// compute probability map gradient based on distance functions
if (func_id_dist == 1) { // barycentric distance
backward_barycentric_p2f_distance(grad_v, t, face_info, xp, yp, dis, C_grad_xy);
} else
if (func_id_dist == 2) { // euclidean distance
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 2; l++) {
grad_v[k][l] = 2 * sign * C_grad_xy * (t[k] + w0[k]) * (l == 0 ? dis_x : dis_y);
}
}
}
atomicAdd(&grad_face[0], grad_v[0][0]);
atomicAdd(&grad_face[1], grad_v[0][1]);
atomicAdd(&grad_face[3], grad_v[1][0]);
atomicAdd(&grad_face[4], grad_v[1][1]);
atomicAdd(&grad_face[6], grad_v[2][0]);
atomicAdd(&grad_face[7], grad_v[2][1]);
atomicAdd(&grad_face[2], grad_v[0][2]);
atomicAdd(&grad_face[5], grad_v[1][2]);
atomicAdd(&grad_face[8], grad_v[2][2]);
}
}
}
std::vector<at::Tensor> forward_soft_rasterize_cuda(
at::Tensor faces,
at::Tensor textures,
at::Tensor faces_info,
at::Tensor aggrs_info,
at::Tensor soft_colors,
at::Tensor depth_maps,
at::Tensor normal_maps,
int image_size,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto texture_size = textures.size(2);
const auto texture_res = int(sqrt(texture_size));
const int threads = 512;
const dim3 blocks_1 ((batch_size * num_faces - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_soft_rasterize_inv_cuda", ([&] {
forward_soft_rasterize_inv_cuda_kernel<scalar_t><<<blocks_1, threads>>>(
faces.data<scalar_t>(),
faces_info.data<scalar_t>(),
batch_size,
num_faces,
image_size);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in forward_transform_inv_triangle: %s\n", cudaGetErrorString(err));
const dim3 blocks_2 ((batch_size * image_size * image_size - 1) / threads +1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_eff_soft_rasterize_cuda", ([&] {
forward_soft_rasterize_cuda_kernel<scalar_t><<<blocks_2, threads>>>(
faces.data<scalar_t>(),
textures.data<scalar_t>(),
faces_info.data<scalar_t>(),
aggrs_info.data<scalar_t>(),
soft_colors.data<scalar_t>(),
depth_maps.data<scalar_t>(),
normal_maps.data<scalar_t>(),
batch_size,
num_faces,
image_size,
texture_size,
texture_res,
near,
far,
eps,
sigma_val,
func_id_dist,
dist_eps,
gamma_val,
func_id_rgb,
func_id_alpha,
texture_sample_type,
double_side);
}));
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in forward_soft_rasterize: %s\n", cudaGetErrorString(err));
return {faces_info, aggrs_info, soft_colors, depth_maps, normal_maps};
}
std::vector<at::Tensor> backward_soft_rasterize_cuda(
at::Tensor faces,
at::Tensor textures,
at::Tensor soft_colors,
at::Tensor faces_info,
at::Tensor aggrs_info,
at::Tensor grad_faces,
at::Tensor grad_textures,
at::Tensor grad_soft_colors,
int image_size,
float near,
float far,
float eps,
float sigma_val,
int func_id_dist,
float dist_eps,
float gamma_val,
int func_id_rgb,
int func_id_alpha,
int texture_sample_type,
bool double_side) {
const auto batch_size = faces.size(0);
const auto num_faces = faces.size(1);
const auto texture_size = textures.size(2);
const auto texture_res = int(sqrt(texture_size));
const int threads = 512;
const dim3 blocks ((batch_size * image_size * image_size - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(faces.type(), "backward_soft_rasterize_cuda", ([&] {
backward_soft_rasterize_cuda_kernel<scalar_t><<<blocks, threads>>>(
faces.data<scalar_t>(),
textures.data<scalar_t>(),
soft_colors.data<scalar_t>(),
faces_info.data<scalar_t>(),
aggrs_info.data<scalar_t>(),
grad_faces.data<scalar_t>(),
grad_textures.data<scalar_t>(),
grad_soft_colors.data<scalar_t>(),
batch_size,
num_faces,
image_size,
texture_size,
texture_res,
near,
far,
eps,
sigma_val,
func_id_dist,
dist_eps,
gamma_val,
func_id_rgb,
func_id_alpha,
texture_sample_type,
double_side);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in backward_soft_rasterize: %s\n", cudaGetErrorString(err));
return {grad_faces, grad_textures};
}
|
8c9fe9a625f3cbbcc70cd7a14f20302dfdc3c82e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file ParticleFilter.cu
* @details This file describes the functions belonging to ParticleFilter class.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include "ParticleFilter.h"
#include "ParticleFilter_kernel.cu"
ParticleFilter::ParticleFilter(int x, int y, int fr, int n)
{
IszX = x;
IszY = y;
Nfr = fr;
Nparticles = n;
max_size = IszX*IszY*Nfr;
//original particle centroid
xe = roundDouble(IszY/2.0);
ye = roundDouble(IszX/2.0);
radius = 5;
diameter = radius*2 - 1;
countOnes = 0;
}
ParticleFilter::~ParticleFilter()
{
//Free host memory
if(seed != NULL) delete [] seed;
if(I != NULL) delete [] I;
if(disk != NULL) delete [] disk;
if(objxy != NULL) delete [] objxy;
if(weights != NULL) delete [] weights;
if(likelihood != NULL) delete [] likelihood;
if(ind != NULL) delete [] ind;
if(arrayX != NULL) hipHostFree(arrayX);
if(arrayY != NULL) hipHostFree(arrayY);
if(xj != NULL) hipHostFree(xj);
if(yj != NULL) hipHostFree(yj);
if(CDF != NULL) hipHostFree(CDF);
if(u != NULL) hipHostFree(u);
//Free device memory
if(arrayX_GPU != NULL) hipFree(arrayX_GPU);
if(arrayY_GPU != NULL) hipFree(arrayY_GPU);
if(xj_GPU != NULL) hipFree(xj_GPU);
if(yj_GPU != NULL) hipFree(yj_GPU);
if(CDF_GPU != NULL) hipFree(CDF_GPU);
if(u_GPU != NULL) hipFree(u_GPU);
}
void ParticleFilter::allocHostMemory(void)
{
seed = new int [Nparticles];
I = new int[IszX*IszY*Nfr];
disk = new int[diameter*diameter];
weights = new double[Nparticles];
likelihood = new double[Nparticles];
hipHostMalloc((void **)&arrayX, sizeof(double)*Nparticles);
hipHostMalloc((void **)&arrayY, sizeof(double)*Nparticles);
hipHostMalloc((void **)&xj, sizeof(double)*Nparticles);
hipHostMalloc((void **)&yj, sizeof(double)*Nparticles);
hipHostMalloc((void **)&CDF, sizeof(double)*Nparticles);
hipHostMalloc((void **)&u, sizeof(double)*Nparticles);
}
void ParticleFilter::freeHostMemory(void)
{
//Free host memory
if(seed != NULL) delete [] seed;
if(I != NULL) delete [] I;
if(disk != NULL) delete [] disk;
if(objxy != NULL) delete [] objxy;
if(weights != NULL) delete [] weights;
if(likelihood != NULL) delete [] likelihood;
if(ind != NULL) delete [] ind;
if(arrayX != NULL) hipHostFree(arrayX);
if(arrayY != NULL) hipHostFree(arrayY);
if(xj != NULL) hipHostFree(xj);
if(yj != NULL) hipHostFree(yj);
if(CDF != NULL) hipHostFree(CDF);
if(u != NULL) hipHostFree(u);
}
void ParticleFilter::allocDeviceMemory(void)
{
//CUDA memory allocation
hipMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles);
hipMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles);
hipMalloc((void **) &xj_GPU, sizeof(double)*Nparticles);
hipMalloc((void **) &yj_GPU, sizeof(double)*Nparticles);
hipMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles);
hipMalloc((void **) &u_GPU, sizeof(double)*Nparticles);
}
void ParticleFilter::freeDeviceMemory(void)
{
//Free device memory
if(arrayX_GPU != NULL) hipFree(arrayX_GPU);
if(arrayY_GPU != NULL) hipFree(arrayY_GPU);
if(xj_GPU != NULL) hipFree(xj_GPU);
if(yj_GPU != NULL) hipFree(yj_GPU);
if(CDF_GPU != NULL) hipFree(CDF_GPU);
if(u_GPU != NULL) hipFree(u_GPU);
}
void ParticleFilter::generatingData(void)
{
for(int i = 0; i < Nparticles; i++)
seed[i] = time(NULL)*i;
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
countOnes = 0;
for(int x = 0; x < diameter; x++){
for(int y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
strelDisk(disk, radius);
//objxy = new double [countOnes];
objxy = new double [2000];
getneighbors(disk, countOnes, objxy, radius);
for(int x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
//ind = new int[countOnes];
ind = new int[2000];
for(int x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
for(int x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
int k = 1;
for(int x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(int y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(int x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
sumWeights = 0;
for(int x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
for(int x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
xe = 0;
ye = 0;
// estimate the object location by expected values
for(int x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2)
+ pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
CDF[0] = weights[0];
for(int x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(int x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
}
void ParticleFilter::memHostToDeviceAsync(hipStream_t stream)
{
hipMemcpyAsync(arrayX_GPU, arrayX, sizeof(double)*Nparticles, hipMemcpyHostToDevice, stream);
hipMemcpyAsync(arrayY_GPU, arrayY, sizeof(double)*Nparticles, hipMemcpyHostToDevice, stream);
hipMemcpyAsync(xj_GPU, xj, sizeof(double)*Nparticles, hipMemcpyHostToDevice, stream);
hipMemcpyAsync(yj_GPU, yj, sizeof(double)*Nparticles, hipMemcpyHostToDevice, stream);
hipMemcpyAsync(CDF_GPU, CDF, sizeof(double)*Nparticles, hipMemcpyHostToDevice, stream);
hipMemcpyAsync(u_GPU, u, sizeof(double)*Nparticles, hipMemcpyHostToDevice, stream);
}
void ParticleFilter::memHostToDevice(void)
{
hipMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(u_GPU, u, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
}
void ParticleFilter::memDeviceToHostAsync(hipStream_t stream)
{
hipMemcpyAsync(yj, yj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost, stream);
hipMemcpyAsync(xj, xj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost, stream);
}
void ParticleFilter::memDeviceToHost(void)
{
hipMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
hipMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
}
void ParticleFilter::launch_kernel_Async(hipStream_t stream)
{
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
hipLaunchKernelGGL(( kernel) , dim3(num_blocks), dim3(threads_per_block), 0, stream , arrayX_GPU, arrayY_GPU, CDF_GPU,
u_GPU, xj_GPU, yj_GPU, Nparticles);
}
void ParticleFilter::launch_kernel(void)
{
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
hipLaunchKernelGGL(( kernel) , dim3(num_blocks), dim3(threads_per_block), 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU,
u_GPU, xj_GPU, yj_GPU, Nparticles);
}
void ParticleFilter::checkResults(void)
{
for(int x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
}
void ParticleFilter::getBytesHTD(int *bytes_htd)
{
*bytes_htd = Nparticles * sizeof(double) * 6;
}
void ParticleFilter::getBytesDTH(int *bytes_dth)
{
*bytes_dth = Nparticles * sizeof(double) * 2;
}
void ParticleFilter::getTimeEstimations_HTD_DTH(int gpu, float *estimated_time_HTD, float *estimated_time_DTH,
float *estimated_overlapped_time_HTD, float *estimated_overlapped_time_DTH,
float LoHTD, float LoDTH, float GHTD, float GDTH, float overlappedGHTD, float overlappedGDTH)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, gpu);
int bytes_HTD;
int bytes_DTH;
getBytesHTD(&bytes_HTD);
getBytesDTH(&bytes_DTH);
*estimated_time_HTD = LoHTD + (bytes_HTD) * GHTD;
*estimated_overlapped_time_HTD = 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_HTD = LoHTD + (bytes_HTD) * overlappedGHTD;
*estimated_time_DTH = LoDTH + (bytes_DTH) * GDTH;
*estimated_overlapped_time_DTH= 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_DTH= LoDTH + (bytes_DTH) * overlappedGDTH;
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double ParticleFilter::roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void ParticleFilter::strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void ParticleFilter::videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void ParticleFilter::imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void ParticleFilter::dilate_matrix(int * matrix, int posX, int posY, int posZ,
int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void ParticleFilter::setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void ParticleFilter::addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void ParticleFilter::getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double ParticleFilter::randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double ParticleFilter::randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double ParticleFilter::calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
} | 8c9fe9a625f3cbbcc70cd7a14f20302dfdc3c82e.cu | /**
* @file ParticleFilter.cu
* @details This file describes the functions belonging to ParticleFilter class.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include "ParticleFilter.h"
#include "ParticleFilter_kernel.cu"
ParticleFilter::ParticleFilter(int x, int y, int fr, int n)
{
IszX = x;
IszY = y;
Nfr = fr;
Nparticles = n;
max_size = IszX*IszY*Nfr;
//original particle centroid
xe = roundDouble(IszY/2.0);
ye = roundDouble(IszX/2.0);
radius = 5;
diameter = radius*2 - 1;
countOnes = 0;
}
ParticleFilter::~ParticleFilter()
{
//Free host memory
if(seed != NULL) delete [] seed;
if(I != NULL) delete [] I;
if(disk != NULL) delete [] disk;
if(objxy != NULL) delete [] objxy;
if(weights != NULL) delete [] weights;
if(likelihood != NULL) delete [] likelihood;
if(ind != NULL) delete [] ind;
if(arrayX != NULL) cudaFreeHost(arrayX);
if(arrayY != NULL) cudaFreeHost(arrayY);
if(xj != NULL) cudaFreeHost(xj);
if(yj != NULL) cudaFreeHost(yj);
if(CDF != NULL) cudaFreeHost(CDF);
if(u != NULL) cudaFreeHost(u);
//Free device memory
if(arrayX_GPU != NULL) cudaFree(arrayX_GPU);
if(arrayY_GPU != NULL) cudaFree(arrayY_GPU);
if(xj_GPU != NULL) cudaFree(xj_GPU);
if(yj_GPU != NULL) cudaFree(yj_GPU);
if(CDF_GPU != NULL) cudaFree(CDF_GPU);
if(u_GPU != NULL) cudaFree(u_GPU);
}
void ParticleFilter::allocHostMemory(void)
{
seed = new int [Nparticles];
I = new int[IszX*IszY*Nfr];
disk = new int[diameter*diameter];
weights = new double[Nparticles];
likelihood = new double[Nparticles];
cudaMallocHost((void **)&arrayX, sizeof(double)*Nparticles);
cudaMallocHost((void **)&arrayY, sizeof(double)*Nparticles);
cudaMallocHost((void **)&xj, sizeof(double)*Nparticles);
cudaMallocHost((void **)&yj, sizeof(double)*Nparticles);
cudaMallocHost((void **)&CDF, sizeof(double)*Nparticles);
cudaMallocHost((void **)&u, sizeof(double)*Nparticles);
}
void ParticleFilter::freeHostMemory(void)
{
//Free host memory
if(seed != NULL) delete [] seed;
if(I != NULL) delete [] I;
if(disk != NULL) delete [] disk;
if(objxy != NULL) delete [] objxy;
if(weights != NULL) delete [] weights;
if(likelihood != NULL) delete [] likelihood;
if(ind != NULL) delete [] ind;
if(arrayX != NULL) cudaFreeHost(arrayX);
if(arrayY != NULL) cudaFreeHost(arrayY);
if(xj != NULL) cudaFreeHost(xj);
if(yj != NULL) cudaFreeHost(yj);
if(CDF != NULL) cudaFreeHost(CDF);
if(u != NULL) cudaFreeHost(u);
}
void ParticleFilter::allocDeviceMemory(void)
{
//CUDA memory allocation
cudaMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles);
cudaMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles);
cudaMalloc((void **) &xj_GPU, sizeof(double)*Nparticles);
cudaMalloc((void **) &yj_GPU, sizeof(double)*Nparticles);
cudaMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles);
cudaMalloc((void **) &u_GPU, sizeof(double)*Nparticles);
}
void ParticleFilter::freeDeviceMemory(void)
{
//Free device memory
if(arrayX_GPU != NULL) cudaFree(arrayX_GPU);
if(arrayY_GPU != NULL) cudaFree(arrayY_GPU);
if(xj_GPU != NULL) cudaFree(xj_GPU);
if(yj_GPU != NULL) cudaFree(yj_GPU);
if(CDF_GPU != NULL) cudaFree(CDF_GPU);
if(u_GPU != NULL) cudaFree(u_GPU);
}
void ParticleFilter::generatingData(void)
{
for(int i = 0; i < Nparticles; i++)
seed[i] = time(NULL)*i;
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
countOnes = 0;
for(int x = 0; x < diameter; x++){
for(int y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
strelDisk(disk, radius);
//objxy = new double [countOnes];
objxy = new double [2000];
getneighbors(disk, countOnes, objxy, radius);
for(int x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
//ind = new int[countOnes];
ind = new int[2000];
for(int x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
for(int x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
int k = 1;
for(int x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(int y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(int x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
sumWeights = 0;
for(int x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
for(int x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
xe = 0;
ye = 0;
// estimate the object location by expected values
for(int x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2)
+ pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
CDF[0] = weights[0];
for(int x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(int x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
}
void ParticleFilter::memHostToDeviceAsync(cudaStream_t stream)
{
cudaMemcpyAsync(arrayX_GPU, arrayX, sizeof(double)*Nparticles, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(arrayY_GPU, arrayY, sizeof(double)*Nparticles, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(xj_GPU, xj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(yj_GPU, yj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(CDF_GPU, CDF, sizeof(double)*Nparticles, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(u_GPU, u, sizeof(double)*Nparticles, cudaMemcpyHostToDevice, stream);
}
void ParticleFilter::memHostToDevice(void)
{
cudaMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(u_GPU, u, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
}
void ParticleFilter::memDeviceToHostAsync(cudaStream_t stream)
{
cudaMemcpyAsync(yj, yj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost, stream);
cudaMemcpyAsync(xj, xj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost, stream);
}
void ParticleFilter::memDeviceToHost(void)
{
cudaMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
cudaMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
}
void ParticleFilter::launch_kernel_Async(cudaStream_t stream)
{
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
kernel <<< num_blocks, threads_per_block, 0, stream >>> (arrayX_GPU, arrayY_GPU, CDF_GPU,
u_GPU, xj_GPU, yj_GPU, Nparticles);
}
void ParticleFilter::launch_kernel(void)
{
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
kernel <<< num_blocks, threads_per_block>>> (arrayX_GPU, arrayY_GPU, CDF_GPU,
u_GPU, xj_GPU, yj_GPU, Nparticles);
}
void ParticleFilter::checkResults(void)
{
for(int x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
}
void ParticleFilter::getBytesHTD(int *bytes_htd)
{
*bytes_htd = Nparticles * sizeof(double) * 6;
}
void ParticleFilter::getBytesDTH(int *bytes_dth)
{
*bytes_dth = Nparticles * sizeof(double) * 2;
}
void ParticleFilter::getTimeEstimations_HTD_DTH(int gpu, float *estimated_time_HTD, float *estimated_time_DTH,
float *estimated_overlapped_time_HTD, float *estimated_overlapped_time_DTH,
float LoHTD, float LoDTH, float GHTD, float GDTH, float overlappedGHTD, float overlappedGDTH)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, gpu);
int bytes_HTD;
int bytes_DTH;
getBytesHTD(&bytes_HTD);
getBytesDTH(&bytes_DTH);
*estimated_time_HTD = LoHTD + (bytes_HTD) * GHTD;
*estimated_overlapped_time_HTD = 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_HTD = LoHTD + (bytes_HTD) * overlappedGHTD;
*estimated_time_DTH = LoDTH + (bytes_DTH) * GDTH;
*estimated_overlapped_time_DTH= 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_DTH= LoDTH + (bytes_DTH) * overlappedGDTH;
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double ParticleFilter::roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void ParticleFilter::strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void ParticleFilter::videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void ParticleFilter::imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void ParticleFilter::dilate_matrix(int * matrix, int posX, int posY, int posZ,
int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void ParticleFilter::setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void ParticleFilter::addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void ParticleFilter::getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double ParticleFilter::randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double ParticleFilter::randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double ParticleFilter::calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
} |
fe61eba0b5fd514da6c691bc3bb6f79ebb84fbab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef DISABLE_GPU
#include <kernels.cuh>
#include <common.cuh>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
__global__ void kSetupCurand(hiprandState_t *state, int nelem, unsigned int seed) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < nelem) hiprand_init(seed, i, 0, &state[i]);
}
#endif
| fe61eba0b5fd514da6c691bc3bb6f79ebb84fbab.cu | #ifndef DISABLE_GPU
#include <kernels.cuh>
#include <common.cuh>
#include <curand.h>
#include <curand_kernel.h>
__global__ void kSetupCurand(curandState *state, int nelem, unsigned int seed) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < nelem) curand_init(seed, i, 0, &state[i]);
}
#endif
|
7d4c1f8f6cd97f34426a9bebaf7ab380299d0e37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zlobpcg_shift.cu, normal z -> s, Thu Oct 8 23:05:46 2020
*/
#include "magmasparse_internal.h"
__global__ void
magma_slobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
float * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
float tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaFloat_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_slobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloat_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( float );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( float( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
hipLaunchKernelGGL(( magma_slobpcg_shift_kernel), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
| 7d4c1f8f6cd97f34426a9bebaf7ab380299d0e37.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zlobpcg_shift.cu, normal z -> s, Thu Oct 8 23:05:46 2020
*/
#include "magmasparse_internal.h"
__global__ void
magma_slobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
float * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
float tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaFloat_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_slobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloat_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( float );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( float( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
magma_slobpcg_shift_kernel<<< grid, block, Ms, queue->cuda_stream() >>>
( num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
|
3bd175ef11760f2de5fbdcb00a59164250d08a21.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
//CUDA RunTime API
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define BLOCK_SIZE 16
__global__ static void matMultCUDA(const float *A, const float* B, float *C, int m, int n, int e)
{
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x *blockDim.x + threadIdx.x;
if (row < m && col < n)
{
float Cvalue = 0;
for (int i = 0; i < e; i++)
{
Cvalue += A[row*e + i] * B[i*n + col];
}
C[n*row + col] = Cvalue;
}
}
extern "C"
void mulWithCUDA(const float *A, const float *B, float *C, int m, int n, int e)
{
hipSetDevice(0);
int heightA = m;
int widthA = e;
int heightB = e;
int widthB = n;
int heightC = heightA;
int widthC = widthB;
float *d_A, *d_B, *d_C;
hipMalloc((void**)&d_A, sizeof(float)*heightA*widthA);
hipMalloc((void**)&d_B, sizeof(float)*heightB*widthB);
hipMalloc((void**)&d_C, sizeof(float)*heightC*widthC);
hipMemcpy(d_A, A, sizeof(float)*heightA*widthA, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, sizeof(float)*heightB*widthB, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//int numBlock = (widthB*heightA + BLOCK_SIZE*BLOCK_SIZE -1) / BLOCK_SIZE*BLOCK_SIZE;
dim3 dimGrid(1, 1);
dimGrid.x = ceil(float(widthB) / double(dimBlock.x));
dimGrid.y = ceil(float(heightA) / double(dimBlock.y));
//dim3 dimGrid((widthB + dimBlock.x - 1) / dimBlock.x, (heightA + dimBlock.y - 1) / dimBlock.y);
//dim3 dimGrid((widthB ) / dimBlock.x, (heightA ) / dimBlock.y);
matMultCUDA << <dimGrid, dimBlock >> > (d_A, d_B, d_C, heightC, widthC, widthA);
hipMemcpy(C, d_C, sizeof(float)*heightC*widthC, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
} | 3bd175ef11760f2de5fbdcb00a59164250d08a21.cu | #include <math.h>
//CUDA RunTime API
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define BLOCK_SIZE 16
__global__ static void matMultCUDA(const float *A, const float* B, float *C, int m, int n, int e)
{
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x *blockDim.x + threadIdx.x;
if (row < m && col < n)
{
float Cvalue = 0;
for (int i = 0; i < e; i++)
{
Cvalue += A[row*e + i] * B[i*n + col];
}
C[n*row + col] = Cvalue;
}
}
extern "C"
void mulWithCUDA(const float *A, const float *B, float *C, int m, int n, int e)
{
cudaSetDevice(0);
int heightA = m;
int widthA = e;
int heightB = e;
int widthB = n;
int heightC = heightA;
int widthC = widthB;
float *d_A, *d_B, *d_C;
cudaMalloc((void**)&d_A, sizeof(float)*heightA*widthA);
cudaMalloc((void**)&d_B, sizeof(float)*heightB*widthB);
cudaMalloc((void**)&d_C, sizeof(float)*heightC*widthC);
cudaMemcpy(d_A, A, sizeof(float)*heightA*widthA, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(float)*heightB*widthB, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//int numBlock = (widthB*heightA + BLOCK_SIZE*BLOCK_SIZE -1) / BLOCK_SIZE*BLOCK_SIZE;
dim3 dimGrid(1, 1);
dimGrid.x = ceil(float(widthB) / double(dimBlock.x));
dimGrid.y = ceil(float(heightA) / double(dimBlock.y));
//dim3 dimGrid((widthB + dimBlock.x - 1) / dimBlock.x, (heightA + dimBlock.y - 1) / dimBlock.y);
//dim3 dimGrid((widthB ) / dimBlock.x, (heightA ) / dimBlock.y);
matMultCUDA << <dimGrid, dimBlock >> > (d_A, d_B, d_C, heightC, widthC, widthA);
cudaMemcpy(C, d_C, sizeof(float)*heightC*widthC, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
} |
021bd1ec7c844696eac7219fa5e2d8c23eff6613.hip | // !!! This is a file automatically generated by hipify!!!
#include "kmeans/kmeans.cu"
#include <gtest/gtest.h>
#include <cuda_utils.h>
#include <test_utils.h>
namespace ML {
using namespace MLCommon;
template<typename T>
struct KmeansInputs {
int n_clusters;
T tol;
int n_row;
int n_col;
};
template<typename T>
class KmeansTest: public ::testing::TestWithParam<KmeansInputs<T> > {
protected:
void basicTest() {
params = ::testing::TestWithParam<KmeansInputs<T>>::GetParam();
int m = params.n_row;
int n = params.n_col;
int k = params.n_clusters;
// make space for outputs : pred_centroids, pred_labels
// and reference output : labels_ref
allocate(d_srcdata, n * m);
allocate(labels_fit, m);
allocate(labels_ref_fit, m);
allocate(pred_centroids, k * n);
allocate(centroids_ref, k * n);
// make testdata on host
T h_srcdata[n * m] =
{1.0,1.0,3.0,4.0, 1.0,2.0,2.0,3.0};
updateDevice(d_srcdata, h_srcdata, m*n);
// make and assign reference output
int h_labels_ref_fit[m] = {1, 1, 0, 0};
updateDevice(labels_ref_fit, h_labels_ref_fit, m);
T h_centroids_ref[k * n] = {3.5,2.5, 1.0,1.5};
updateDevice(centroids_ref, h_centroids_ref, k * n);
// The actual kmeans api calls
// fit
make_ptr_kmeans(0, verbose, seed, gpu_id, n_gpu, m, n,
ord, k, k, max_iterations,
init_from_data, params.tol, d_srcdata, nullptr, pred_centroids, labels_fit);
}
void SetUp() override {
basicTest();
}
void TearDown() override {
CUDA_CHECK(hipFree(d_srcdata));
CUDA_CHECK(hipFree(labels_fit));
CUDA_CHECK(hipFree(pred_centroids));
CUDA_CHECK(hipFree(labels_ref_fit));
CUDA_CHECK(hipFree(centroids_ref));
}
protected:
KmeansInputs<T> params;
T *d_srcdata;
int *labels_fit, *labels_ref_fit;
T *pred_centroids, *centroids_ref;
int verbose = 0;
int seed = 1;
int gpu_id = 0;
int n_gpu = -1;
char ord = 'c'; // here c means col order, NOT C (vs F) order
int max_iterations = 300;
int init_from_data = 0;
};
const std::vector<KmeansInputs<float> > inputsf2 = {
{ 2, 0.05f, 4, 2 }};
const std::vector<KmeansInputs<double> > inputsd2 = {
{ 2, 0.05, 4, 2 }};
typedef KmeansTest<float> KmeansTestF;
TEST_P(KmeansTestF, Fit) {
ASSERT_TRUE(
devArrMatch(labels_ref_fit, labels_fit, params.n_row,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(centroids_ref, pred_centroids, params.n_clusters * params.n_col,
CompareApproxAbs<float>(params.tol)));
}
typedef KmeansTest<double> KmeansTestD;
TEST_P(KmeansTestD, Fit) {
ASSERT_TRUE(
devArrMatch(labels_ref_fit, labels_fit, params.n_row,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(centroids_ref, pred_centroids, params.n_clusters * params.n_col,
CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestD, ::testing::ValuesIn(inputsd2));
} // end namespace ML
| 021bd1ec7c844696eac7219fa5e2d8c23eff6613.cu |
#include "kmeans/kmeans.cu"
#include <gtest/gtest.h>
#include <cuda_utils.h>
#include <test_utils.h>
namespace ML {
using namespace MLCommon;
template<typename T>
struct KmeansInputs {
int n_clusters;
T tol;
int n_row;
int n_col;
};
template<typename T>
class KmeansTest: public ::testing::TestWithParam<KmeansInputs<T> > {
protected:
void basicTest() {
params = ::testing::TestWithParam<KmeansInputs<T>>::GetParam();
int m = params.n_row;
int n = params.n_col;
int k = params.n_clusters;
// make space for outputs : pred_centroids, pred_labels
// and reference output : labels_ref
allocate(d_srcdata, n * m);
allocate(labels_fit, m);
allocate(labels_ref_fit, m);
allocate(pred_centroids, k * n);
allocate(centroids_ref, k * n);
// make testdata on host
T h_srcdata[n * m] =
{1.0,1.0,3.0,4.0, 1.0,2.0,2.0,3.0};
updateDevice(d_srcdata, h_srcdata, m*n);
// make and assign reference output
int h_labels_ref_fit[m] = {1, 1, 0, 0};
updateDevice(labels_ref_fit, h_labels_ref_fit, m);
T h_centroids_ref[k * n] = {3.5,2.5, 1.0,1.5};
updateDevice(centroids_ref, h_centroids_ref, k * n);
// The actual kmeans api calls
// fit
make_ptr_kmeans(0, verbose, seed, gpu_id, n_gpu, m, n,
ord, k, k, max_iterations,
init_from_data, params.tol, d_srcdata, nullptr, pred_centroids, labels_fit);
}
void SetUp() override {
basicTest();
}
void TearDown() override {
CUDA_CHECK(cudaFree(d_srcdata));
CUDA_CHECK(cudaFree(labels_fit));
CUDA_CHECK(cudaFree(pred_centroids));
CUDA_CHECK(cudaFree(labels_ref_fit));
CUDA_CHECK(cudaFree(centroids_ref));
}
protected:
KmeansInputs<T> params;
T *d_srcdata;
int *labels_fit, *labels_ref_fit;
T *pred_centroids, *centroids_ref;
int verbose = 0;
int seed = 1;
int gpu_id = 0;
int n_gpu = -1;
char ord = 'c'; // here c means col order, NOT C (vs F) order
int max_iterations = 300;
int init_from_data = 0;
};
const std::vector<KmeansInputs<float> > inputsf2 = {
{ 2, 0.05f, 4, 2 }};
const std::vector<KmeansInputs<double> > inputsd2 = {
{ 2, 0.05, 4, 2 }};
typedef KmeansTest<float> KmeansTestF;
TEST_P(KmeansTestF, Fit) {
ASSERT_TRUE(
devArrMatch(labels_ref_fit, labels_fit, params.n_row,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(centroids_ref, pred_centroids, params.n_clusters * params.n_col,
CompareApproxAbs<float>(params.tol)));
}
typedef KmeansTest<double> KmeansTestD;
TEST_P(KmeansTestD, Fit) {
ASSERT_TRUE(
devArrMatch(labels_ref_fit, labels_fit, params.n_row,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(centroids_ref, pred_centroids, params.n_clusters * params.n_col,
CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestD, ::testing::ValuesIn(inputsd2));
} // end namespace ML
|
15f3e072cc29eea944b576bec1078a65464ed6f1.hip | // !!! This is a file automatically generated by hipify!!!
//
// Author: Marko Atanasievski
//
// Copyright (C) 2020 TANCOM SOFTWARE SOLUTIONS Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//
// Parts of this file are originally copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "cuda_util.h"
#include <iostream>
#include "mat.h"
#include "crop_cuda.h"
__global__ void gpu_crop_forward(const unsigned char* a_input, const ncnn::CudaMatInfo a_info, unsigned char* output, const ncnn::CudaMatInfo output_info) {
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int channel = blockIdx.z * blockDim.z + threadIdx.z;
if (column >= a_info.w || row >= a_info.h || channel >= a_info.c)
{
return;
}
}
template<typename T>
__global__ void gpu_crop_copy_cut_border_image(const T* a_input, const ncnn::CudaMatInfo a_info,
T* output, const ncnn::CudaMatInfo output_info,
const int top, const int left, const int channel_offset = 0) {
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int channel = blockIdx.z * blockDim.z + threadIdx.z ;
if ((row >= output_info.h)
|| (column >= output_info.w)
|| (channel >= output_info.c))
{
return;
}
const int input_row = row + top;
const int input_column = column + left;
const int input_channel = channel + channel_offset;
const int input_channel_step = input_channel * a_info.cstep * a_info.elemsize;
const int output_channel_step = channel * output_info.cstep * output_info.elemsize;
const T* ptr = (T*)((unsigned char*)a_input + input_channel_step + a_info.w * input_row * a_info.elemsize + input_column * a_info.elemsize);
T* out_ptr = (T*)((unsigned char*)output + output_channel_step + output_info.w * row * output_info.elemsize + column * output_info.elemsize);
*out_ptr = *ptr;
}
namespace ncnn {
int crop_cuda_forward(const CudaMat& bottom_blob, CudaMat& top_blob, const Crop_cuda::Crop_info crop_info)
{
int input_w = bottom_blob.w;
int input_h = bottom_blob.h;
int input_channels = bottom_blob.c;
int input_dims = bottom_blob.dims;
size_t input_elemsize = bottom_blob.elemsize;
const ncnn::CudaMatInfo bottom_blob_info{bottom_blob};
ncnn::CudaMatInfo top_blob_info{top_blob};
std::shared_ptr<ncnn::CudaAllocator> cuda_allocator = ncnn::get_current_gpu_allocator();
if (input_dims == 1) {
if (crop_info.outw == input_w) {
top_blob = bottom_blob;
}
top_blob.create(crop_info.outw, input_elemsize, cuda_allocator);
if (top_blob.empty())
return -100;
top_blob_info = ncnn::CudaMatInfo{top_blob};
}
else if (input_dims == 2)
{
if (crop_info.outw == input_w && crop_info.outh == input_h)
{
top_blob = bottom_blob;
return 0;
}
top_blob.create(crop_info.outw, crop_info.outh, input_elemsize, cuda_allocator);
if (top_blob.empty())
return -100;
top_blob_info = ncnn::CudaMatInfo{top_blob};
}
else if (input_dims == 3)
{
if (crop_info.outw == input_w && crop_info.outh == input_h && crop_info.outc == input_channels)
{
top_blob = bottom_blob;
return 0;
}
top_blob.create(crop_info.outw, crop_info.outh, crop_info.outc, input_elemsize, cuda_allocator);
if (top_blob.empty())
return -100;
top_blob_info = ncnn::CudaMatInfo{top_blob};
}
int thread_per_block_x = ((top_blob_info.w - 1) / 64 + 1) * 64;
if (thread_per_block_x > 128) thread_per_block_x = 128;
int thread_per_block_y = ((top_blob_info.h - 1) / 8 + 1) * 8;
if (thread_per_block_y > 8) thread_per_block_y = 8;
const int thread_per_block_z = 1;
const int total_number_of_channels = top_blob_info.c;
const int total_number_of_columns = top_blob_info.w;
const int total_number_of_rows = top_blob_info.h;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
if (input_elemsize == 1)
hipLaunchKernelGGL(( gpu_crop_copy_cut_border_image<signed char>), dim3(grid_size), dim3(block_size), 0, 0, static_cast<const signed char*>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<signed char*>(top_blob.get_raw_data()),
top_blob_info,
crop_info.hoffset, crop_info.woffset,
(crop_info.coffset < 0 ? 0 : crop_info.coffset));
if (input_elemsize == 2)
hipLaunchKernelGGL(( gpu_crop_copy_cut_border_image<unsigned short>), dim3(grid_size), dim3(block_size), 0, 0, static_cast<const unsigned short*>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<unsigned short*>(top_blob.get_raw_data()),
top_blob_info,
crop_info.hoffset, crop_info.woffset,
(crop_info.coffset < 0 ? 0 : crop_info.coffset));
if (input_elemsize == 4)
hipLaunchKernelGGL(( gpu_crop_copy_cut_border_image<float>), dim3(grid_size), dim3(block_size), 0, 0, static_cast<const float*>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<float*>(top_blob.get_raw_data()),
top_blob_info,
crop_info.hoffset, crop_info.woffset,
(crop_info.coffset < 0 ? 0 : crop_info.coffset));
return 0;
}
} | 15f3e072cc29eea944b576bec1078a65464ed6f1.cu | //
// Author: Marko Atanasievski
//
// Copyright (C) 2020 TANCOM SOFTWARE SOLUTIONS Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//
// Parts of this file are originally copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cuda_util.h"
#include <iostream>
#include "mat.h"
#include "crop_cuda.h"
__global__ void gpu_crop_forward(const unsigned char* a_input, const ncnn::CudaMatInfo a_info, unsigned char* output, const ncnn::CudaMatInfo output_info) {
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int channel = blockIdx.z * blockDim.z + threadIdx.z;
if (column >= a_info.w || row >= a_info.h || channel >= a_info.c)
{
return;
}
}
template<typename T>
__global__ void gpu_crop_copy_cut_border_image(const T* a_input, const ncnn::CudaMatInfo a_info,
T* output, const ncnn::CudaMatInfo output_info,
const int top, const int left, const int channel_offset = 0) {
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int channel = blockIdx.z * blockDim.z + threadIdx.z ;
if ((row >= output_info.h)
|| (column >= output_info.w)
|| (channel >= output_info.c))
{
return;
}
const int input_row = row + top;
const int input_column = column + left;
const int input_channel = channel + channel_offset;
const int input_channel_step = input_channel * a_info.cstep * a_info.elemsize;
const int output_channel_step = channel * output_info.cstep * output_info.elemsize;
const T* ptr = (T*)((unsigned char*)a_input + input_channel_step + a_info.w * input_row * a_info.elemsize + input_column * a_info.elemsize);
T* out_ptr = (T*)((unsigned char*)output + output_channel_step + output_info.w * row * output_info.elemsize + column * output_info.elemsize);
*out_ptr = *ptr;
}
namespace ncnn {
int crop_cuda_forward(const CudaMat& bottom_blob, CudaMat& top_blob, const Crop_cuda::Crop_info crop_info)
{
int input_w = bottom_blob.w;
int input_h = bottom_blob.h;
int input_channels = bottom_blob.c;
int input_dims = bottom_blob.dims;
size_t input_elemsize = bottom_blob.elemsize;
const ncnn::CudaMatInfo bottom_blob_info{bottom_blob};
ncnn::CudaMatInfo top_blob_info{top_blob};
std::shared_ptr<ncnn::CudaAllocator> cuda_allocator = ncnn::get_current_gpu_allocator();
if (input_dims == 1) {
if (crop_info.outw == input_w) {
top_blob = bottom_blob;
}
top_blob.create(crop_info.outw, input_elemsize, cuda_allocator);
if (top_blob.empty())
return -100;
top_blob_info = ncnn::CudaMatInfo{top_blob};
}
else if (input_dims == 2)
{
if (crop_info.outw == input_w && crop_info.outh == input_h)
{
top_blob = bottom_blob;
return 0;
}
top_blob.create(crop_info.outw, crop_info.outh, input_elemsize, cuda_allocator);
if (top_blob.empty())
return -100;
top_blob_info = ncnn::CudaMatInfo{top_blob};
}
else if (input_dims == 3)
{
if (crop_info.outw == input_w && crop_info.outh == input_h && crop_info.outc == input_channels)
{
top_blob = bottom_blob;
return 0;
}
top_blob.create(crop_info.outw, crop_info.outh, crop_info.outc, input_elemsize, cuda_allocator);
if (top_blob.empty())
return -100;
top_blob_info = ncnn::CudaMatInfo{top_blob};
}
int thread_per_block_x = ((top_blob_info.w - 1) / 64 + 1) * 64;
if (thread_per_block_x > 128) thread_per_block_x = 128;
int thread_per_block_y = ((top_blob_info.h - 1) / 8 + 1) * 8;
if (thread_per_block_y > 8) thread_per_block_y = 8;
const int thread_per_block_z = 1;
const int total_number_of_channels = top_blob_info.c;
const int total_number_of_columns = top_blob_info.w;
const int total_number_of_rows = top_blob_info.h;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
if (input_elemsize == 1)
gpu_crop_copy_cut_border_image<signed char><<<grid_size, block_size>>>(static_cast<const signed char*>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<signed char*>(top_blob.get_raw_data()),
top_blob_info,
crop_info.hoffset, crop_info.woffset,
(crop_info.coffset < 0 ? 0 : crop_info.coffset));
if (input_elemsize == 2)
gpu_crop_copy_cut_border_image<unsigned short><<<grid_size, block_size>>>(static_cast<const unsigned short*>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<unsigned short*>(top_blob.get_raw_data()),
top_blob_info,
crop_info.hoffset, crop_info.woffset,
(crop_info.coffset < 0 ? 0 : crop_info.coffset));
if (input_elemsize == 4)
gpu_crop_copy_cut_border_image<float><<<grid_size, block_size>>>(static_cast<const float*>(bottom_blob.get_craw_data()),
bottom_blob_info,
static_cast<float*>(top_blob.get_raw_data()),
top_blob_info,
crop_info.hoffset, crop_info.woffset,
(crop_info.coffset < 0 ? 0 : crop_info.coffset));
return 0;
}
} |
80fb091fc2a84c28794ed8603805e631acad7ce7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/ztrtri_diag.cu, normal z -> s, Sun Nov 20 20:20:30 2016
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named strtri_diag.cu to avoid name conflict with src/strtri.o
in the library. The actual kernels are in strtri_lower.cu and strtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_NONBATCHED
#include "strtri.cuh"
/***************************************************************************//**
Purpose
-------
strtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA REAL array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
d_dinvA REAL array of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_diag
*******************************************************************************/
extern "C" void
magmablas_strtri_diag(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr d_dinvA,
magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
hipMemset( d_dinvA, 0, magma_roundup( n, NB )*NB * sizeof(float) );
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
hipLaunchKernelGGL(( strtri_diag_lower_kernel)
, dim3(nblocks), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA, ldda, d_dinvA );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_sgemm16_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm16_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_sgemm32_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm32_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_sgemm64_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm64_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_sgemm_above64_part1_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part2_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part3_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
hipLaunchKernelGGL(( strtri_diag_upper_kernel)
, dim3(nblocks), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA, ldda, d_dinvA );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_sgemm16_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm16_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_sgemm32_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm32_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_sgemm64_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm64_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_sgemm_above64_part1_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part2_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part3_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
| 80fb091fc2a84c28794ed8603805e631acad7ce7.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/ztrtri_diag.cu, normal z -> s, Sun Nov 20 20:20:30 2016
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named strtri_diag.cu to avoid name conflict with src/strtri.o
in the library. The actual kernels are in strtri_lower.cu and strtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_NONBATCHED
#include "strtri.cuh"
/***************************************************************************//**
Purpose
-------
strtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA REAL array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
d_dinvA REAL array of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_diag
*******************************************************************************/
extern "C" void
magmablas_strtri_diag(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr d_dinvA,
magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
cudaMemset( d_dinvA, 0, magma_roundup( n, NB )*NB * sizeof(float) );
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
strtri_diag_lower_kernel
<<< nblocks, IB, 0, queue->cuda_stream() >>>
( diag, n, dA, ldda, d_dinvA );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_sgemm16_part1_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm16_part2_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
triple_sgemm32_part1_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm32_part2_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
triple_sgemm64_part1_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm64_part2_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
default:
triple_sgemm_above64_part1_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm_above64_part2_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm_above64_part3_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
strtri_diag_upper_kernel
<<< nblocks, IB, 0, queue->cuda_stream() >>>
( diag, n, dA, ldda, d_dinvA );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_sgemm16_part1_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm16_part2_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
triple_sgemm32_part1_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm32_part2_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
triple_sgemm64_part1_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm64_part2_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
default:
triple_sgemm_above64_part1_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm_above64_part2_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm_above64_part3_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
fe0537aefa4853d93d8443c27aba9503c266c3a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ __launch_bounds__(brick.x*brick.y*brick.z) void AMR_add(){
const int3 coord = make_int3(blockIdx*brick+threadIdx);
int cset = pars.cinfo[Cinfo::ind_zip(coord.x,coord.y,coord.z)].set;
if(cset==0) return;
Cell cm; cm.load_st(coord.x,coord.y,coord.z);
int3 coordp=coord;
int dist=0;
do{
coordp.x = (coordp.x+1)%pars.Nx;
cset = pars.cinfo[Cinfo::ind_zip(coordp.x,coordp.y,coordp.z)].set;
dist++;
} while (cset==0);
Cell cp; cp.load_st(coordp.x,coordp.y,coordp.z);
ftype vdif = cp.uT.x-cm.uT.x;
int3 coord_mid=coord; coord_mid.x=(coord_mid.x+dist/2)%pars.Nx;
if(vdif*vdif/min(cm.uT.w,cp.uT.w)>0.01 || fabs(1-cm.rho/cp.rho)>0.01) {
if(dist<=1) printf("limit of cells division exceeded\n");
if(dist%2==0) {
Cell c0;
c0.rho = 0.5*(cm.rho+cp.rho);
c0.uT = 0.5*(cm.uT+cp.uT);
for(int i=0; i<Qn; i++) c0.f[i] = 0.5*(cm.f[i]+cp.f[i]);
c0.save_12(coord_mid.x,coord_mid.y,coord_mid.z);
pars.cinfo[Cinfo::ind_zip(coord_mid.x,coord_mid.y,coord_mid.z)].setnew=1;
}
}
}
__global__ __launch_bounds__(1) void AMR_remove(){
for(int ix=0;ix<pars.Nx; ix++) {
const int3 coord = make_int3(ix,0,0);
int cset = pars.cinfo[Cinfo::ind_zip(coord.x,coord.y,coord.z)].set;
if(cset==0) continue;
Cell c0; c0.load_st(coord.x,coord.y,coord.z);
int3 coordp=coord, coordm=coord;
int distM=0,distP=0;
do{
coordm.x = (coordm.x-1+pars.Nx)%pars.Nx;
cset = pars.cinfo[Cinfo::ind_zip(coordm.x,coordm.y,coordm.z)].set;
distM++;
} while (cset==0);
do{
coordp.x = (coordp.x+1)%pars.Nx;
cset = pars.cinfo[Cinfo::ind_zip(coordp.x,coordp.y,coordp.z)].set;
distP++;
} while (cset==0);
Cell cm; cm.load_st(coordm.x,coordm.y,coordm.z);
Cell cp; cp.load_st(coordp.x,coordp.y,coordp.z);
ftype vdifm = cm.uT.x-c0.uT.x;
ftype vdifp = cp.uT.x-c0.uT.x;
if(vdifm*vdifm/c0.uT.w<0.01 && vdifp*vdifp/c0.uT.w<0.01 && fabs(cp.rho/c0.rho-1)<0.01 && fabs(cm.rho/c0.rho-1)<0.01 && (distM<4 && distP<4)) {
pars.cinfo[Cinfo::ind_zip(coord.x,coord.y,coord.z)].set=0;
pars.cinfo[Cinfo::ind_zip(coord.x,coord.y,coord.z)].set=0;
}
}
}
| fe0537aefa4853d93d8443c27aba9503c266c3a4.cu | __global__ __launch_bounds__(brick.x*brick.y*brick.z) void AMR_add(){
const int3 coord = make_int3(blockIdx*brick+threadIdx);
int cset = pars.cinfo[Cinfo::ind_zip(coord.x,coord.y,coord.z)].set;
if(cset==0) return;
Cell cm; cm.load_st(coord.x,coord.y,coord.z);
int3 coordp=coord;
int dist=0;
do{
coordp.x = (coordp.x+1)%pars.Nx;
cset = pars.cinfo[Cinfo::ind_zip(coordp.x,coordp.y,coordp.z)].set;
dist++;
} while (cset==0);
Cell cp; cp.load_st(coordp.x,coordp.y,coordp.z);
ftype vdif = cp.uT.x-cm.uT.x;
int3 coord_mid=coord; coord_mid.x=(coord_mid.x+dist/2)%pars.Nx;
if(vdif*vdif/min(cm.uT.w,cp.uT.w)>0.01 || fabs(1-cm.rho/cp.rho)>0.01) {
if(dist<=1) printf("limit of cells division exceeded\n");
if(dist%2==0) {
Cell c0;
c0.rho = 0.5*(cm.rho+cp.rho);
c0.uT = 0.5*(cm.uT+cp.uT);
for(int i=0; i<Qn; i++) c0.f[i] = 0.5*(cm.f[i]+cp.f[i]);
c0.save_12(coord_mid.x,coord_mid.y,coord_mid.z);
pars.cinfo[Cinfo::ind_zip(coord_mid.x,coord_mid.y,coord_mid.z)].setnew=1;
}
}
}
__global__ __launch_bounds__(1) void AMR_remove(){
for(int ix=0;ix<pars.Nx; ix++) {
const int3 coord = make_int3(ix,0,0);
int cset = pars.cinfo[Cinfo::ind_zip(coord.x,coord.y,coord.z)].set;
if(cset==0) continue;
Cell c0; c0.load_st(coord.x,coord.y,coord.z);
int3 coordp=coord, coordm=coord;
int distM=0,distP=0;
do{
coordm.x = (coordm.x-1+pars.Nx)%pars.Nx;
cset = pars.cinfo[Cinfo::ind_zip(coordm.x,coordm.y,coordm.z)].set;
distM++;
} while (cset==0);
do{
coordp.x = (coordp.x+1)%pars.Nx;
cset = pars.cinfo[Cinfo::ind_zip(coordp.x,coordp.y,coordp.z)].set;
distP++;
} while (cset==0);
Cell cm; cm.load_st(coordm.x,coordm.y,coordm.z);
Cell cp; cp.load_st(coordp.x,coordp.y,coordp.z);
ftype vdifm = cm.uT.x-c0.uT.x;
ftype vdifp = cp.uT.x-c0.uT.x;
if(vdifm*vdifm/c0.uT.w<0.01 && vdifp*vdifp/c0.uT.w<0.01 && fabs(cp.rho/c0.rho-1)<0.01 && fabs(cm.rho/c0.rho-1)<0.01 && (distM<4 && distP<4)) {
pars.cinfo[Cinfo::ind_zip(coord.x,coord.y,coord.z)].set=0;
pars.cinfo[Cinfo::ind_zip(coord.x,coord.y,coord.z)].set=0;
}
}
}
|
460afe01c5862d332345fdf141c79c55a6f1305f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/powc/kern.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./kern.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/elemwise_helper.cuh"
using namespace megdnn;
using namespace cuda;
#include <cmath>
#include <limits>
// use a namespace (but not anonymous namespace) to avoid name confliction while
// maintaining readability of cuda kernel names
namespace cuda_kern {
template <int>
struct PowCIntSmall;
template <>
struct PowCIntSmall<0> {
template <typename T>
static __device__ __forceinline__ T apply(T) {
return static_cast<T>(1);
}
};
template <>
struct PowCIntSmall<1> {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return x;
}
};
template <>
struct PowCIntSmall<2> {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return x * x;
}
};
template <>
struct PowCIntSmall<3> {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return x * x * x;
}
};
template <>
struct PowCIntSmall<4> {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
x = x * x;
return x * x;
}
};
template <int n>
struct PowCIntSmall {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return PowCIntSmall<-n>::apply(static_cast<T>(1) / x);
}
};
template <typename T>
struct PowCIntOdd {
T exp;
__device__ __forceinline__ T apply(T x) {
return static_cast<T>(copysignf(powf(fabsf(x), exp), x));
}
};
template <typename T>
struct PowCIntEven {
T exp;
__device__ __forceinline__ T apply(T x) {
return static_cast<T>(powf(fabsf(x), exp));
}
};
struct PowCFloatSqrt {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return static_cast<T>(sqrtf(x));
}
};
struct PowCFloatCbrt {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return static_cast<T>(cbrtf(x));
}
};
struct PowCFloatRSqrt {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return static_cast<T>(rsqrtf(x));
}
};
struct PowCFloatRCbrt {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return static_cast<T>(rcbrtf(x));
}
};
template <typename T>
struct PowCFloat {
T exp;
__device__ __forceinline__ T apply(T x) {
return static_cast<T>(powf(x, exp));
}
};
template <typename T, typename PowOp>
struct PowCOp {
T* dest;
PowOp pow_op;
__device__ __forceinline__ void operator()(uint32_t idx, T src) {
dest[idx] = pow_op.apply(src);
}
};
} // namespace cuda_kern
using namespace cuda_kern;
namespace {
template <typename T, typename PowOp>
void invoke(const TensorND& dest, const TensorND& src, PowOp pow_op,
hipStream_t stream) {
ElemwiseOpParamN<1> param;
param[0] = src;
param.init_from_given_tensor();
typedef PowCOp<T, PowOp> Op;
Op op;
op.dest = dest.ptr<T>();
op.pow_op = pow_op;
run_elemwise<Op, T, 1>(param, stream, op);
}
bool feq(float a, float b) {
return std::abs(a - b) < std::numeric_limits<float>::epsilon();
}
template <typename T>
void dispatch_op(const TensorND& dest, const TensorND& src, const float* exp_f,
const int* exp_i, hipStream_t stream) {
#define CALL(_op) invoke<T>(dest, src, _op, stream)
if (exp_f) {
float exp = *exp_f;
#define CALL_IF(_v, _op) \
do { \
if (feq(exp, _v)) { \
CALL(_op); \
return; \
} \
} while (0)
CALL_IF(.5f, PowCFloatSqrt());
CALL_IF(1.f / 3.f, PowCFloatCbrt());
CALL_IF(-.5f, PowCFloatRSqrt());
CALL_IF(-1.f / 3.f, PowCFloatRCbrt());
PowCFloat<T> op;
op.exp = exp;
CALL(op);
return;
#undef CALL_IF
}
int exp = *exp_i;
switch (exp) {
#define CASE(v) \
case v: \
CALL(PowCIntSmall<v>()); \
return
CASE(0);
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(-1);
CASE(-2);
CASE(-3);
CASE(-4);
#undef CASE
}
if (exp & 1) {
PowCIntOdd<T> op;
op.exp = exp;
CALL(op);
} else {
PowCIntEven<T> op;
op.exp = exp;
CALL(op);
}
#undef CALL
}
} // anonymous namespace
void cuda::powc_kern(const TensorND& dest, const TensorND& src,
const float* exp_f, const int* exp_i,
hipStream_t stream) {
switch (src.layout.dtype.enumv().ev) {
#define cb(dt) \
case DTypeTrait<dt>::enumv: \
return dispatch_op<DTypeTrait<dt>::ctype>(dest, src, exp_f, exp_i, \
stream);
MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(cb)
#undef cb
default:
megdnn_throw("unsupported dtype for PowC");
}
}
// vim: syntax=cpp.doxygen
| 460afe01c5862d332345fdf141c79c55a6f1305f.cu | /**
* \file dnn/src/cuda/powc/kern.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./kern.cuh"
#include "megdnn/dtype.h"
#include "src/cuda/elemwise_helper.cuh"
using namespace megdnn;
using namespace cuda;
#include <cmath>
#include <limits>
// use a namespace (but not anonymous namespace) to avoid name confliction while
// maintaining readability of cuda kernel names
namespace cuda_kern {
template <int>
struct PowCIntSmall;
template <>
struct PowCIntSmall<0> {
template <typename T>
static __device__ __forceinline__ T apply(T) {
return static_cast<T>(1);
}
};
template <>
struct PowCIntSmall<1> {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return x;
}
};
template <>
struct PowCIntSmall<2> {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return x * x;
}
};
template <>
struct PowCIntSmall<3> {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return x * x * x;
}
};
template <>
struct PowCIntSmall<4> {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
x = x * x;
return x * x;
}
};
template <int n>
struct PowCIntSmall {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return PowCIntSmall<-n>::apply(static_cast<T>(1) / x);
}
};
template <typename T>
struct PowCIntOdd {
T exp;
__device__ __forceinline__ T apply(T x) {
return static_cast<T>(copysignf(powf(fabsf(x), exp), x));
}
};
template <typename T>
struct PowCIntEven {
T exp;
__device__ __forceinline__ T apply(T x) {
return static_cast<T>(powf(fabsf(x), exp));
}
};
struct PowCFloatSqrt {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return static_cast<T>(sqrtf(x));
}
};
struct PowCFloatCbrt {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return static_cast<T>(cbrtf(x));
}
};
struct PowCFloatRSqrt {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return static_cast<T>(rsqrtf(x));
}
};
struct PowCFloatRCbrt {
template <typename T>
static __device__ __forceinline__ T apply(T x) {
return static_cast<T>(rcbrtf(x));
}
};
template <typename T>
struct PowCFloat {
T exp;
__device__ __forceinline__ T apply(T x) {
return static_cast<T>(powf(x, exp));
}
};
template <typename T, typename PowOp>
struct PowCOp {
T* dest;
PowOp pow_op;
__device__ __forceinline__ void operator()(uint32_t idx, T src) {
dest[idx] = pow_op.apply(src);
}
};
} // namespace cuda_kern
using namespace cuda_kern;
namespace {
template <typename T, typename PowOp>
void invoke(const TensorND& dest, const TensorND& src, PowOp pow_op,
cudaStream_t stream) {
ElemwiseOpParamN<1> param;
param[0] = src;
param.init_from_given_tensor();
typedef PowCOp<T, PowOp> Op;
Op op;
op.dest = dest.ptr<T>();
op.pow_op = pow_op;
run_elemwise<Op, T, 1>(param, stream, op);
}
bool feq(float a, float b) {
return std::abs(a - b) < std::numeric_limits<float>::epsilon();
}
template <typename T>
void dispatch_op(const TensorND& dest, const TensorND& src, const float* exp_f,
const int* exp_i, cudaStream_t stream) {
#define CALL(_op) invoke<T>(dest, src, _op, stream)
if (exp_f) {
float exp = *exp_f;
#define CALL_IF(_v, _op) \
do { \
if (feq(exp, _v)) { \
CALL(_op); \
return; \
} \
} while (0)
CALL_IF(.5f, PowCFloatSqrt());
CALL_IF(1.f / 3.f, PowCFloatCbrt());
CALL_IF(-.5f, PowCFloatRSqrt());
CALL_IF(-1.f / 3.f, PowCFloatRCbrt());
PowCFloat<T> op;
op.exp = exp;
CALL(op);
return;
#undef CALL_IF
}
int exp = *exp_i;
switch (exp) {
#define CASE(v) \
case v: \
CALL(PowCIntSmall<v>()); \
return
CASE(0);
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(-1);
CASE(-2);
CASE(-3);
CASE(-4);
#undef CASE
}
if (exp & 1) {
PowCIntOdd<T> op;
op.exp = exp;
CALL(op);
} else {
PowCIntEven<T> op;
op.exp = exp;
CALL(op);
}
#undef CALL
}
} // anonymous namespace
void cuda::powc_kern(const TensorND& dest, const TensorND& src,
const float* exp_f, const int* exp_i,
cudaStream_t stream) {
switch (src.layout.dtype.enumv().ev) {
#define cb(dt) \
case DTypeTrait<dt>::enumv: \
return dispatch_op<DTypeTrait<dt>::ctype>(dest, src, exp_f, exp_i, \
stream);
MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(cb)
#undef cb
default:
megdnn_throw("unsupported dtype for PowC");
}
}
// vim: syntax=cpp.doxygen
|
7577af2507a7f872a3be4214f14806c39785e02d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ker_dense_to_sparse_subtract.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
const unsigned int *idx = NULL;
hipMalloc(&idx, XSIZE*YSIZE);
float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float *trg = NULL;
hipMalloc(&trg, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ker_dense_to_sparse_subtract), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,src,trg);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ker_dense_to_sparse_subtract), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,src,trg);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ker_dense_to_sparse_subtract), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,src,trg);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7577af2507a7f872a3be4214f14806c39785e02d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ker_dense_to_sparse_subtract.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
const unsigned int *idx = NULL;
cudaMalloc(&idx, XSIZE*YSIZE);
float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float *trg = NULL;
cudaMalloc(&trg, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ker_dense_to_sparse_subtract<<<gridBlock,threadBlock>>>(n,idx,src,trg);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ker_dense_to_sparse_subtract<<<gridBlock,threadBlock>>>(n,idx,src,trg);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ker_dense_to_sparse_subtract<<<gridBlock,threadBlock>>>(n,idx,src,trg);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
53f295b65f540a52ce9eb79c475be43d638f93bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** maximum X per block (used in dimensions for blocks and amount of shared memory */
#define SHARED_X ${SHARED_X}
/** maximum Y per block (used in dimensions for blocks and amount of shared memory */
#define SHARED_Y ${SHARED_Y}
/** kernel contains a for-loop in which the score is calculated. */
#define DIAGONAL SHARED_X + SHARED_Y
/** amount of score elements in a single block */
#define blockSize (SHARED_X * SHARED_Y)
/** amount of blocks across the X axis */
#define XdivSHARED_X (X/SHARED_X)
/** amount of blocks across the Y axis */
#define YdivSHARED_Y (Y/SHARED_Y)
/** character used to fill the sequence if length < X */
#define FILL_CHARACTER '\0'
#define FILL_SCORE -1E10f
/** this value is used to allocate enough memory to store the starting points */
#define MAXIMUM_NUMBER_STARTING_POINTS (NUMBER_SEQUENCES*NUMBER_TARGETS*1000)
/**** Other definitions ****/
/** bit mask to get the negative value of a float, or to keep it negative */
#define SIGN_BIT_MASK 0x80000000
#define MAX_LINE_LENGTH 500
#define AFFINE_GAP_INIT -1E10f
/* Scorings matrix for each thread block */
typedef struct {
float value[SHARED_X][SHARED_Y];
} LocalMatrix;
/* Scorings matrix for each sequence alignment */
typedef struct {
LocalMatrix matrix[XdivSHARED_X][YdivSHARED_Y];
} ScoringsMatrix;
/* Scorings matrix for entire application */
typedef struct {
ScoringsMatrix metaMatrix[NUMBER_SEQUENCES][NUMBER_TARGETS];
} GlobalMatrix;
typedef struct {
float value[XdivSHARED_X][YdivSHARED_Y];
} BlockMaxima;
typedef struct {
BlockMaxima blockMaxima[NUMBER_SEQUENCES][NUMBER_TARGETS];
} GlobalMaxima;
typedef struct {
unsigned char value[SHARED_X][SHARED_Y];
} LocalDirection;
typedef struct {
LocalDirection localDirection[XdivSHARED_X][YdivSHARED_Y];
} Direction;
typedef struct {
Direction direction[NUMBER_SEQUENCES][NUMBER_TARGETS];
} GlobalDirection;
typedef struct {
unsigned int sequence;
unsigned int target;
unsigned int blockX;
unsigned int blockY;
unsigned int valueX;
unsigned int valueY;
float score;
float maxScore;
float posScore;
} StartingPoint;
typedef struct {
StartingPoint startingPoint[MAXIMUM_NUMBER_STARTING_POINTS];
} StartingPoints;
typedef struct {
float value[NUMBER_TARGETS];
} TargetMaxima;
typedef struct {
float value[NUMBER_SEQUENCES];
} SequenceMaxima;
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <builtin_types.h>
#include <time.h>
/**
* The calculateScore function checks the alignment per block. It calculates the score for each cell in
* shared memory.
* @matrix The scorings matrix
* @x The start x block position in the alignment to be calculated
* @y The start y block position in the alignment to be calculated
* @numberOfBlocks The amount of blocks within an alignment which can be calculated
* @seq1 The upper sequence in the alignment
* @seq2 The left sequence in the alignment
*/
extern "C"
__global__ void calculateScore(GlobalMatrix *matrix, unsigned int x, unsigned int y, unsigned int numberOfBlocks, char *sequences, char *targets, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection);
extern "C"
__global__ void calculateScoreAffineGap(GlobalMatrix *matrix, GlobalMatrix *matrix_i, GlobalMatrix *matrix_j, unsigned int x, unsigned int y, unsigned int numberOfBlocks, char *sequences, char *targets, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection);
extern "C"
__global__ void traceback(GlobalMatrix *matrix, unsigned int x, unsigned int y, unsigned int numberOfBlocks, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection, GlobalDirection *globalDirectionZeroCopy, unsigned int *indexIncrement, StartingPoints *startingPoints, float *maxPossibleScore);
extern "C"
__global__ void tracebackAffineGap(GlobalMatrix *matrix, GlobalMatrix *matrix_i, GlobalMatrix *matrix_j, unsigned int x, unsigned int y, unsigned int numberOfBlocks, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection, GlobalDirection *globalDirectionZeroCopy, unsigned int *indexIncrement, StartingPoints *startingPoints, float *maxPossibleScore);
/**
* The calculateScore function checks the alignment per block. It calculates the score for each cell in
* shared memory
* @matrix The scorings matrix
* @x The start x block position in the alignment to be calculated
* @y The start y block position in the alignment to be calculated
* @numberOfBlocks The amount of blocks within an alignment which can be calculated
* @seq1 The upper sequence in the alignment
* @seq2 The left sequence in the alignment
*/
__global__ void calculateScore(
GlobalMatrix *matrix, unsigned int x, unsigned int y, unsigned int numberOfBlocks,
char *sequences, char *targets,
GlobalMaxima *globalMaxima,
GlobalDirection *globalDirection
) {
/**
* shared memory block for calculations. It requires
* extra (+1 in both directions) space to hold
* Neighboring cells
*/
__shared__ float s_matrix[SHARED_X+1][SHARED_Y+1];
/**
* shared memory block for storing the maximum value of each neighboring cell.
* Careful: the s_maxima[SHARED_X][SHARED_Y] does not contain the maximum value
* after the calculation loop! This value is determined at the end of this
* function.
*/
__shared__ float s_maxima[SHARED_X][SHARED_Y];
// calculate indices:
//unsigned int yDIVnumSeq = (blockIdx.y/NUMBER_SEQUENCES);
unsigned int blockx = x - blockIdx.y/NUMBER_TARGETS;//yDIVnumSeq;
unsigned int blocky = y + blockIdx.y/NUMBER_TARGETS;//yDIVnumSeq;
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;///numberOfBlocks;
unsigned char direction = NO_DIRECTION;
// indices of the current characters in both sequences.
int seqIndex1 = tIDx + bIDx * X + blockx * SHARED_X;
int seqIndex2 = tIDy + bIDy * Y + blocky * SHARED_Y;
/* the next block is to get the maximum value from surrounding blocks. This maximum values is compared to the
* first element in the shared score matrix s_matrix.
*/
float maxPrev = 0.0f;
if (!tIDx && !tIDy) {
if (blockx && blocky) {
maxPrev = max(max(globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky-1], globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky]), globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky-1]);
}
else if (blockx) {
maxPrev = globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky];
}
else if (blocky) {
maxPrev = globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky-1];
}
}
// local scorings variables:
float currentScore, ulS, lS, uS;
float innerScore = 0.0f;
/**
* tXM1 and tYM1 are to store the current value of the thread Index. tIDx and tIDy are
* both increased with 1 later on.
*/
unsigned int tXM1 = tIDx;
unsigned int tYM1 = tIDy;
// shared location for the parts of the 2 sequences, for faster retrieval later on:
__shared__ char s_seq1[SHARED_X];
__shared__ char s_seq2[SHARED_Y];
// copy sequence data to shared memory (shared is much faster than global)
if (!tIDy)
s_seq1[tIDx] = sequences[seqIndex1];
if (!tIDx)
s_seq2[tIDy] = targets[seqIndex2];
// set both matrices to zero
s_matrix[tIDx][tIDy] = 0.0f;
s_maxima[tIDx][tIDy] = 0.0f;
if (tIDx == SHARED_X-1 && ! tIDy)
s_matrix[SHARED_X][0] = 0.0f;
if (tIDy == SHARED_Y-1 && ! tIDx)
s_matrix[0][SHARED_Y] = 0.0f;
/**** sync barrier ****/
s_matrix[tIDx][tIDy] = 0.0f;
__syncthreads();
// initialize outer parts of the matrix:
if (!tIDx || !tIDy) {
if (tIDx == SHARED_X-1)
s_matrix[tIDx+1][tIDy] = 0.0f;
if (tIDy == SHARED_Y-1)
s_matrix[tIDx][tIDy+1] = 0.0f;
if (blockx && !tIDx) {
s_matrix[0][tIDy+1] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
}
if (blocky && !tIDy) {
s_matrix[tIDx+1][0] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
}
if (blockx && blocky && !tIDx && !tIDy){
s_matrix[0][0] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
}
}
// set inner score (aka sequence match/mismatch score):
char charS1 = s_seq1[tIDx];
char charS2 = s_seq2[tIDy];
innerScore = charS1 == FILL_CHARACTER || charS2 == FILL_CHARACTER ? FILL_SCORE : scoringsMatrix[charS1-characterOffset][charS2-characterOffset];
// transpose the index
++tIDx;
++tIDy;
// set shared matrix to zero (starting point!)
s_matrix[tIDx][tIDy] = 0.0f;
// wait until all elements have been copied to the shared memory block
/**** sync barrier ****/
__syncthreads();
currentScore = 0.0f;
for (int i=0; i < DIAGONAL; ++i) {
if (i == tXM1+ tYM1) {
// calculate only when there are two valid characters
// this is necessary when the two sequences are not of equal length
// this is the SW-scoring of the cell:
ulS = s_matrix[tXM1][tYM1] + innerScore;
lS = s_matrix[tXM1][tIDy] + gapScore;
uS = s_matrix[tIDx][tYM1] + gapScore;
if (currentScore < lS) { // score comes from left
currentScore = lS;
direction = LEFT_DIRECTION;
}
if (currentScore < uS) { // score comes from above
currentScore = uS;
direction = UPPER_DIRECTION;
}
if (currentScore < ulS) { // score comes from upper left
currentScore = ulS;
direction = UPPER_LEFT_DIRECTION;
}
s_matrix[tIDx][tIDy] = innerScore == FILL_SCORE ? 0.0 : currentScore; // copy score to matrix
}
else if (i-1 == tXM1 + tYM1 ){
// use this to find max
if (i==1) {
s_maxima[0][0] = max(maxPrev, currentScore);
}
else if (!tXM1 && tYM1) {
s_maxima[0][tYM1] = max(s_maxima[0][tYM1-1], currentScore);
}
else if (!tYM1 && tXM1) {
s_maxima[tXM1][0] = max(s_maxima[tXM1-1][0], currentScore);
}
else if (tXM1 && tYM1 ){
s_maxima[tXM1][tYM1] = max(s_maxima[tXM1-1][tYM1], max(s_maxima[tXM1][tYM1-1], currentScore));
}
}
// wait until all threads have calculated their new score
/**** sync barrier ****/
__syncthreads();
}
// copy end score to the scorings matrix:
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tXM1][tYM1] = s_matrix[tIDx][tIDy];
(*globalDirection).direction[bIDx][bIDy].localDirection[blockx][blocky].value[tXM1][tYM1] = direction;
if (tIDx==SHARED_X && tIDy==SHARED_Y)
globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky] = max(currentScore, max(s_maxima[SHARED_X-2][SHARED_Y-1], s_maxima[SHARED_X-1][SHARED_Y-2]));
// wait until all threads have copied their score:
/**** sync barrier ****/
__syncthreads();
}
/**
* The calculateScoreAffineGape function checks the alignment per block. It calculates the score for each cell in
* shared memory using the affine gap penalty score
* @matrix The scorings matrix
* @matrix_i The scorings matrix, gaps in X
* @matrix_j The scorings matrix, gaps in Y
* @x The start x block position in the alignment to be calculated
* @y The start y block position in the alignment to be calculated
* @numberOfBlocks The amount of blocks within an alignment which can be calculated
* @seq1 The upper sequence in the alignment
* @seq2 The left sequence in the alignment
*/
__global__ void calculateScoreAffineGap(GlobalMatrix *matrix, GlobalMatrix *matrix_i, GlobalMatrix *matrix_j,
unsigned int x, unsigned int y, unsigned int numberOfBlocks,
char *sequences, char *targets,
GlobalMaxima *globalMaxima,
GlobalDirection *globalDirection
) {
/**
* shared memory block for calculations. It requires
* extra (+1 in both directions) space to hold
* Neighboring cells
*/
__shared__ float s_matrix[SHARED_X+1][SHARED_Y+1];
__shared__ float s_matrix_i[SHARED_X+1][SHARED_Y+1];
__shared__ float s_matrix_j[SHARED_X+1][SHARED_Y+1];
/**
* shared memory block for storing the maximum value of each neighboring cell.
* Careful: the s_maxima[SHARED_X][SHARED_Y] does not contain the maximum value
* after the calculation loop! This value is determined at the end of this
* function.
*/
__shared__ float s_maxima[SHARED_X][SHARED_Y];
// calculate indices:
//unsigned int yDIVnumSeq = (blockIdx.y/NUMBER_SEQUENCES);
unsigned int blockx = x - blockIdx.y/NUMBER_TARGETS;//yDIVnumSeq;
unsigned int blocky = y + blockIdx.y/NUMBER_TARGETS;//yDIVnumSeq;
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;///numberOfBlocks;
unsigned char direction = NO_DIRECTION;
unsigned char direction_i = NO_DIRECTION;
unsigned char direction_j = NO_DIRECTION;
// indices of the current characters in both sequences.
int seqIndex1 = tIDx + bIDx * X + blockx * SHARED_X;
int seqIndex2 = tIDy + bIDy * Y + blocky * SHARED_Y;
/* the next block is to get the maximum value from surrounding blocks. This maximum values is compared to the
* first element in the shared score matrix s_matrix.
*/
float maxPrev = 0.0f;
if (!tIDx && !tIDy) {
if (blockx && blocky) {
maxPrev = max(max(globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky-1], globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky]), globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky-1]);
}
else if (blockx) {
maxPrev = globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky];
}
else if (blocky) {
maxPrev = globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky-1];
}
}
// local scorings variables:
float currentScore,currentScore_i, currentScore_j, m_M, m_I, m_J;
float innerScore = 0.0f;
/**
* tXM1 and tYM1 are to store the current value of the thread Index. tIDx and tIDy are
* both increased with 1 later on.
*/
unsigned int tXM1 = tIDx;
unsigned int tYM1 = tIDy;
// shared location for the parts of the 2 sequences, for faster retrieval later on:
__shared__ char s_seq1[SHARED_X];
__shared__ char s_seq2[SHARED_Y];
// copy sequence data to shared memory (shared is much faster than global)
if (!tIDy)
s_seq1[tIDx] = sequences[seqIndex1];
if (!tIDx)
s_seq2[tIDy] = targets[seqIndex2];
// set matrices to zero
s_matrix[tIDx][tIDy] = 0.0f;
s_matrix_i[tIDx][tIDy] = AFFINE_GAP_INIT;
s_matrix_j[tIDx][tIDy] = AFFINE_GAP_INIT;
s_maxima[tIDx][tIDy] = 0.0f;
if (tIDx == SHARED_X-1 && ! tIDy) {
s_matrix[SHARED_X][0] = 0.0f;
s_matrix_i[SHARED_X][0] = AFFINE_GAP_INIT;
s_matrix_j[SHARED_X][0] = AFFINE_GAP_INIT;
}
if (tIDy == SHARED_Y-1 && ! tIDx) {
s_matrix[0][SHARED_Y] = 0.0f;
s_matrix_i[0][SHARED_Y] = AFFINE_GAP_INIT;
s_matrix_j[0][SHARED_Y] = AFFINE_GAP_INIT;
}
/**** sync barrier ****/
__syncthreads();
// initialize outer parts of the matrix:
if (!tIDx || !tIDy) {
if (tIDx == SHARED_X-1) {
s_matrix[tIDx+1][tIDy] = 0.0f;
s_matrix_i[tIDx+1][tIDy] = AFFINE_GAP_INIT;
s_matrix_j[tIDx+1][tIDy] = AFFINE_GAP_INIT;
}
if (tIDy == SHARED_Y-1) {
s_matrix[tIDx][tIDy+1] = 0.0f;
s_matrix_i[tIDx][tIDy+1] = AFFINE_GAP_INIT;
s_matrix_j[tIDx][tIDy+1] = AFFINE_GAP_INIT;
}
if (blockx && !tIDx) {
s_matrix[0][tIDy+1] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
s_matrix_i[0][tIDy+1] = (*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
s_matrix_j[0][tIDy+1] = (*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
}
if (blocky && !tIDy) {
s_matrix[tIDx+1][0] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
s_matrix_i[tIDx+1][0] = (*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
s_matrix_j[tIDx+1][0] = (*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
}
if (blockx && blocky && !tIDx && !tIDy){
s_matrix[0][0] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
s_matrix_i[0][0] = (*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
s_matrix_j[0][0] = (*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
}
}
// set inner score (aka sequence match/mismatch score):
char charS1 = s_seq1[tIDx];
char charS2 = s_seq2[tIDy];
innerScore = charS1 == FILL_CHARACTER || charS2 == FILL_CHARACTER ? FILL_SCORE : scoringsMatrix[charS1-characterOffset][charS2-characterOffset];
// transpose the index
++tIDx;
++tIDy;
// set shared matrix to zero (starting point!)
s_matrix[tIDx][tIDy] = 0.0f;
s_matrix_i[tIDx][tIDy] = AFFINE_GAP_INIT;
s_matrix_j[tIDx][tIDy] = AFFINE_GAP_INIT;
// wait until all elements have been copied to the shared memory block
/**** sync barrier ****/
__syncthreads();
currentScore = 0.0f;
for (int i=0; i < DIAGONAL; ++i) {
if (i == tXM1+ tYM1) {
// calculate only when there are two valid characters
// this is necessary when the two sequences are not of equal length
// this is the SW-scoring of the cell:
m_M = s_matrix[tXM1][tYM1]+innerScore;
m_I = s_matrix_i[tXM1][tYM1]+innerScore;
m_J = s_matrix_j[tXM1][tYM1]+innerScore;
if (currentScore < m_I) { // score comes from I matrix (gap in x)
currentScore = m_I;
direction = A_DIRECTION | MAIN_MATRIX;
}
if (currentScore < m_J) { // score comes from J matrix (gap in y)
currentScore = m_J;
direction = A_DIRECTION | MAIN_MATRIX;
}
if (currentScore < m_M) { // score comes from m matrix (match)
currentScore = m_M;
direction = A_DIRECTION | MAIN_MATRIX;
}
s_matrix[tIDx][tIDy] = innerScore == FILL_SCORE ? 0.0 : currentScore; // copy score to matrix
// now do I matrix:
currentScore_i = AFFINE_GAP_INIT;
m_M = gapScore + gapExtension + s_matrix[tIDx][tYM1];
m_I = gapExtension + s_matrix_i[tIDx][tYM1];
if (currentScore_i < m_I) { // score comes from I matrix (gap in x)
currentScore_i = m_I;
direction_i = B_DIRECTION | I_MATRIX;
}
if (currentScore_i < m_M) { // score comes from m matrix (match)
currentScore_i = m_M;
direction_i= B_DIRECTION | I_MATRIX;
}
s_matrix_i[tIDx][tIDy] = currentScore_i < 0 ? AFFINE_GAP_INIT : currentScore_i; // copy score to matrix
// now do J matrix:
currentScore_j = AFFINE_GAP_INIT;
m_M = gapScore + gapExtension + s_matrix[tXM1][tIDy];
m_J = gapExtension + s_matrix_j[tXM1][tIDy];
if (currentScore_j < m_J) { // score comes from J matrix (gap in y)
currentScore_j = m_J;
direction_j = C_DIRECTION | J_MATRIX;
}
if (currentScore_j < m_M) { // score comes from m matrix (match)
currentScore_j = m_M;
direction_j = C_DIRECTION | J_MATRIX;
}
s_matrix_j[tIDx][tIDy] = currentScore_j < 0 ? AFFINE_GAP_INIT : currentScore_j; // copy score to matrix
currentScore = fmax(currentScore,fmax(currentScore_i,currentScore_j));
if (currentScore > 0) {
if (currentScore == s_matrix[tIDx][tIDy]) {// direction from main
direction = direction;
}
else if(currentScore == s_matrix_i[tIDx][tIDy]) {// direction from I
direction = direction_i;
}
else if(currentScore == s_matrix_j[tIDx][tIDy]){ // direction from J
direction = direction_j;
}
}
}
else if (i-1 == tXM1 + tYM1 ){
// use this to find max
if (i==1) {
s_maxima[0][0] = max(maxPrev, currentScore);
}
else if (!tXM1 && tYM1) {
s_maxima[0][tYM1] = max(s_maxima[0][tYM1-1], currentScore);
}
else if (!tYM1 && tXM1) {
s_maxima[tXM1][0] = max(s_maxima[tXM1-1][0], currentScore);
}
else if (tXM1 && tYM1 ){
s_maxima[tXM1][tYM1] = max(s_maxima[tXM1-1][tYM1], max(s_maxima[tXM1][tYM1-1], currentScore));
}
}
// wait until all threads have calculated their new score
/**** sync barrier ****/
__syncthreads();
}
// copy end score to the scorings matrix:
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tXM1][tYM1] = s_matrix[tIDx][tIDy];
(*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tXM1][tYM1] = s_matrix_i[tIDx][tIDy];
(*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tXM1][tYM1] = s_matrix_j[tIDx][tIDy];
(*globalDirection).direction[bIDx][bIDy].localDirection[blockx][blocky].value[tXM1][tYM1] = direction;
if (tIDx==SHARED_X && tIDy==SHARED_Y)
globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky] = max(currentScore, max(s_maxima[SHARED_X-2][SHARED_Y-1], s_maxima[SHARED_X-1][SHARED_Y-2]));
// wait until all threads have copied their score:
/**** sync barrier ****/
__syncthreads();
}
__global__ void traceback(GlobalMatrix *matrix, unsigned int x, unsigned int y, unsigned int numberOfBlocks, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection, GlobalDirection *globalDirectionZeroCopy, unsigned int *indexIncrement, StartingPoints *startingPoints, float *maxPossibleScore) {
/**
* shared memory block for calculations. It requires
* extra (+1 in both directions) space to hold
* Neighboring cells
*/
__shared__ float s_matrix[SHARED_X+1][SHARED_Y+1];
/**
* shared memory for storing the maximum value of this alignment.
*/
__shared__ float s_maxima[1];
__shared__ float s_maxPossibleScore[1];
// calculate indices:
unsigned int yDIVnumSeq = (blockIdx.y/NUMBER_TARGETS);
unsigned int blockx = x - yDIVnumSeq;
unsigned int blocky = y + yDIVnumSeq;
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;
float value;
if (!tIDx && !tIDy) {
s_maxima[0] = globalMaxima->blockMaxima[bIDx][bIDy].value[XdivSHARED_X-1][YdivSHARED_Y-1];
s_maxPossibleScore[0] = maxPossibleScore[bIDy*NUMBER_SEQUENCES+bIDx];
}
__syncthreads();
if (s_maxima[0]>= MINIMUM_SCORE) { // if the maximum score is below threshold, there is nothing to do
s_matrix[tIDx][tIDy] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy];
unsigned char direction = globalDirection->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy];
// wait until all elements have been copied to the shared memory block
/**** sync barrier ****/
__syncthreads();
for (int i=DIAGONAL-1; i >= 0; --i) {
if ((i == tIDx + tIDy) && direction == UPPER_LEFT_DIRECTION && s_matrix[tIDx][tIDy] >= LOWER_LIMIT_SCORE * s_maxima[0] && s_matrix[tIDx][tIDy] >= s_maxPossibleScore[0]) {
// found starting point!
// reserve index:
unsigned int index = atomicAdd(indexIncrement, 1);
// now copy this to host:
StartingPoint *start = &(startingPoints->startingPoint[index]);
start->sequence = bIDx;
start->target = bIDy;
start->blockX = blockx;
start->blockY = blocky;
start->valueX = tIDx;
start->valueY = tIDy;
start->score = s_matrix[tIDx][tIDy];
start->maxScore = s_maxima[0];
start->posScore = s_maxPossibleScore[0];
// startingPoints->startingPoint[index] = start;
// mark this value:
s_matrix[tIDx][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(s_matrix[tIDx][tIDy]));
}
__syncthreads();
if ((i == tIDx + tIDy) && s_matrix[tIDx][tIDy] < 0 && direction == UPPER_LEFT_DIRECTION) {
if (tIDx && tIDy){
value = s_matrix[tIDx-1][tIDy-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
s_matrix[tIDx-1][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (!tIDx && tIDy && blockx) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (!tIDx && !tIDy && blockx && blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (tIDx && !tIDy && blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx-1][SHARED_Y-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx-1][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
__syncthreads();
if ((i == tIDx + tIDy) && s_matrix[tIDx][tIDy] < 0 && direction == UPPER_DIRECTION) {
if (!tIDy) {
if (blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
else {
value = s_matrix[tIDx][tIDy-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
s_matrix[tIDx][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
__syncthreads();
if ((i == tIDx + tIDy) && s_matrix[tIDx][tIDy] < 0 && direction == LEFT_DIRECTION) {
if (!tIDx){
if (blockx) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
else {
value = s_matrix[tIDx-1][tIDy];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
s_matrix[tIDx-1][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
__syncthreads();
}
// copy end score to the scorings matrix:
if (s_matrix[tIDx][tIDy] < 0) {
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy] = s_matrix[tIDx][tIDy];
globalDirectionZeroCopy->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy] = direction;
}
/**** sync barrier ****/
__syncthreads();
}
}
__device__ unsigned char tracebackStepLeftUp(unsigned int blockx, unsigned int blocky, float s_matrix[][SHARED_Y+1], GlobalMatrix *matrix, unsigned char direction){
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;
unsigned char dir = direction;
float value;
if (tIDx && tIDy){
value = s_matrix[tIDx-1][tIDy-1];
if (value == 0.0f)
dir = STOP_DIRECTION;
else
s_matrix[tIDx-1][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (!tIDx && tIDy && blockx) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy-1];
if (value == 0.0f)
dir = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (!tIDx && !tIDy && blockx && blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
if (value == 0.0f)
dir = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (tIDx && !tIDy && blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx-1][SHARED_Y-1];
if (value == 0.0f)
dir = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx-1][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
return dir;
}
__device__ unsigned char tracebackStepUp(unsigned int blockx, unsigned int blocky, float s_matrix[][SHARED_Y+1], GlobalMatrix *matrix, unsigned char direction){
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;
unsigned char dir = direction;
float value;
if (!tIDy) {
if (blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
else {
value = s_matrix[tIDx][tIDy-1];
s_matrix[tIDx][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
return dir;
}
__device__ unsigned char tracebackStepLeft(unsigned int blockx, unsigned int blocky, float s_matrix[][SHARED_Y+1], GlobalMatrix *matrix, unsigned char direction){
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;
unsigned char dir = direction;
float value;
if (!tIDx){
if (blockx) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
else {
value = s_matrix[tIDx-1][tIDy];
s_matrix[tIDx-1][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
return dir;
}
__global__ void tracebackAffineGap(GlobalMatrix *matrix, GlobalMatrix *matrix_i, GlobalMatrix *matrix_j,unsigned int x, unsigned int y, unsigned int numberOfBlocks, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection, GlobalDirection *globalDirectionZeroCopy, unsigned int *indexIncrement, StartingPoints *startingPoints, float *maxPossibleScore){
/**
* shared memory block for calculations. It requires
* extra (+1 in both directions) space to hold
* Neighboring cells
*/
__shared__ float s_matrix[SHARED_X+1][SHARED_Y+1];
__shared__ float s_matrix_i[SHARED_X+1][SHARED_Y+1];
__shared__ float s_matrix_j[SHARED_X+1][SHARED_Y+1];
/**
* shared memory for storing the maximum value of this alignment.
*/
__shared__ float s_maxima[1];
__shared__ float s_maxPossibleScore[1];
// calculate indices:
unsigned int yDIVnumSeq = (blockIdx.y/NUMBER_TARGETS);
unsigned int blockx = x - yDIVnumSeq;
unsigned int blocky = y + yDIVnumSeq;
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;
if (!tIDx && !tIDy) {
s_maxima[0] = globalMaxima->blockMaxima[bIDx][bIDy].value[XdivSHARED_X-1][YdivSHARED_Y-1];
s_maxPossibleScore[0] = maxPossibleScore[bIDy*NUMBER_SEQUENCES+bIDx];
}
__syncthreads();
if (s_maxima[0]>= MINIMUM_SCORE) { // if the maximum score is below threshold, there is nothing to do
unsigned char direction = DIRECTION_MASK & globalDirection->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy];
unsigned char matrix_source = MATRIX_MASK & globalDirection->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy];
s_matrix[tIDx][tIDy] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy];
s_matrix_i[tIDx][tIDy] = (*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy];
s_matrix_j[tIDx][tIDy] = (*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy];
// wait until all elements have been copied to the shared memory block
/**** sync barrier ****/
__syncthreads();
for (int i=DIAGONAL-1; i >= 0; --i) {
if ((i == tIDx + tIDy) && matrix_source == MAIN_MATRIX && s_matrix[tIDx][tIDy] >= LOWER_LIMIT_SCORE * s_maxima[0] && s_matrix[tIDx][tIDy] >= s_maxPossibleScore[0]) {
// found starting point!
// reserve index:
unsigned int index = atomicAdd(indexIncrement, 1);
// now copy this to host:
StartingPoint *start = &(startingPoints->startingPoint[index]);
start->sequence = bIDx;
start->target = bIDy;
start->blockX = blockx;
start->blockY = blocky;
start->valueX = tIDx;
start->valueY = tIDy;
start->score = s_matrix[tIDx][tIDy];
start->maxScore = s_maxima[0];
start->posScore = s_maxPossibleScore[0];
// startingPoints->startingPoint[index] = start;
// mark this value:
s_matrix[tIDx][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(s_matrix[tIDx][tIDy]));
}
__syncthreads();
if ((i == tIDx + tIDy) && (
(s_matrix[tIDx][tIDy] < 0 && matrix_source == MAIN_MATRIX) ||
(s_matrix_i[tIDx][tIDy] < 0 && s_matrix_i[tIDx][tIDy] > AFFINE_GAP_INIT && matrix_source == I_MATRIX) ||
(s_matrix_j[tIDx][tIDy] < 0 && s_matrix_j[tIDx][tIDy] > AFFINE_GAP_INIT && matrix_source == J_MATRIX)
)) {
// check which matrix to go to:
switch (direction) {
case A_DIRECTION : // M
direction = tracebackStepLeftUp(blockx, blocky, s_matrix, matrix, direction);
break;
case B_DIRECTION : // I
direction = tracebackStepUp(blockx, blocky, s_matrix_i, matrix_i, direction);
break;
case C_DIRECTION : // J
direction = tracebackStepLeft(blockx, blocky, s_matrix_j, matrix_j, direction);
break;
}
}
__syncthreads();
}
if (matrix_source == MAIN_MATRIX) {
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy] = s_matrix[tIDx][tIDy];
globalDirectionZeroCopy->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy] = direction;
}
else if (matrix_source == I_MATRIX) {
(*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy] = s_matrix_i[tIDx][tIDy];
globalDirectionZeroCopy->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy] = direction;
}
else if (matrix_source == J_MATRIX) {
(*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy] = s_matrix_j[tIDx][tIDy];
globalDirectionZeroCopy->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy] = direction;
}
/**** sync barrier ****/
__syncthreads();
}
}
| 53f295b65f540a52ce9eb79c475be43d638f93bd.cu | /** maximum X per block (used in dimensions for blocks and amount of shared memory */
#define SHARED_X ${SHARED_X}
/** maximum Y per block (used in dimensions for blocks and amount of shared memory */
#define SHARED_Y ${SHARED_Y}
/** kernel contains a for-loop in which the score is calculated. */
#define DIAGONAL SHARED_X + SHARED_Y
/** amount of score elements in a single block */
#define blockSize (SHARED_X * SHARED_Y)
/** amount of blocks across the X axis */
#define XdivSHARED_X (X/SHARED_X)
/** amount of blocks across the Y axis */
#define YdivSHARED_Y (Y/SHARED_Y)
/** character used to fill the sequence if length < X */
#define FILL_CHARACTER '\0'
#define FILL_SCORE -1E10f
/** this value is used to allocate enough memory to store the starting points */
#define MAXIMUM_NUMBER_STARTING_POINTS (NUMBER_SEQUENCES*NUMBER_TARGETS*1000)
/**** Other definitions ****/
/** bit mask to get the negative value of a float, or to keep it negative */
#define SIGN_BIT_MASK 0x80000000
#define MAX_LINE_LENGTH 500
#define AFFINE_GAP_INIT -1E10f
/* Scorings matrix for each thread block */
typedef struct {
float value[SHARED_X][SHARED_Y];
} LocalMatrix;
/* Scorings matrix for each sequence alignment */
typedef struct {
LocalMatrix matrix[XdivSHARED_X][YdivSHARED_Y];
} ScoringsMatrix;
/* Scorings matrix for entire application */
typedef struct {
ScoringsMatrix metaMatrix[NUMBER_SEQUENCES][NUMBER_TARGETS];
} GlobalMatrix;
typedef struct {
float value[XdivSHARED_X][YdivSHARED_Y];
} BlockMaxima;
typedef struct {
BlockMaxima blockMaxima[NUMBER_SEQUENCES][NUMBER_TARGETS];
} GlobalMaxima;
typedef struct {
unsigned char value[SHARED_X][SHARED_Y];
} LocalDirection;
typedef struct {
LocalDirection localDirection[XdivSHARED_X][YdivSHARED_Y];
} Direction;
typedef struct {
Direction direction[NUMBER_SEQUENCES][NUMBER_TARGETS];
} GlobalDirection;
typedef struct {
unsigned int sequence;
unsigned int target;
unsigned int blockX;
unsigned int blockY;
unsigned int valueX;
unsigned int valueY;
float score;
float maxScore;
float posScore;
} StartingPoint;
typedef struct {
StartingPoint startingPoint[MAXIMUM_NUMBER_STARTING_POINTS];
} StartingPoints;
typedef struct {
float value[NUMBER_TARGETS];
} TargetMaxima;
typedef struct {
float value[NUMBER_SEQUENCES];
} SequenceMaxima;
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <builtin_types.h>
#include <time.h>
/**
* The calculateScore function checks the alignment per block. It calculates the score for each cell in
* shared memory.
* @matrix The scorings matrix
* @x The start x block position in the alignment to be calculated
* @y The start y block position in the alignment to be calculated
* @numberOfBlocks The amount of blocks within an alignment which can be calculated
* @seq1 The upper sequence in the alignment
* @seq2 The left sequence in the alignment
*/
extern "C"
__global__ void calculateScore(GlobalMatrix *matrix, unsigned int x, unsigned int y, unsigned int numberOfBlocks, char *sequences, char *targets, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection);
extern "C"
__global__ void calculateScoreAffineGap(GlobalMatrix *matrix, GlobalMatrix *matrix_i, GlobalMatrix *matrix_j, unsigned int x, unsigned int y, unsigned int numberOfBlocks, char *sequences, char *targets, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection);
extern "C"
__global__ void traceback(GlobalMatrix *matrix, unsigned int x, unsigned int y, unsigned int numberOfBlocks, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection, GlobalDirection *globalDirectionZeroCopy, unsigned int *indexIncrement, StartingPoints *startingPoints, float *maxPossibleScore);
extern "C"
__global__ void tracebackAffineGap(GlobalMatrix *matrix, GlobalMatrix *matrix_i, GlobalMatrix *matrix_j, unsigned int x, unsigned int y, unsigned int numberOfBlocks, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection, GlobalDirection *globalDirectionZeroCopy, unsigned int *indexIncrement, StartingPoints *startingPoints, float *maxPossibleScore);
/**
* The calculateScore function checks the alignment per block. It calculates the score for each cell in
* shared memory
* @matrix The scorings matrix
* @x The start x block position in the alignment to be calculated
* @y The start y block position in the alignment to be calculated
* @numberOfBlocks The amount of blocks within an alignment which can be calculated
* @seq1 The upper sequence in the alignment
* @seq2 The left sequence in the alignment
*/
__global__ void calculateScore(
GlobalMatrix *matrix, unsigned int x, unsigned int y, unsigned int numberOfBlocks,
char *sequences, char *targets,
GlobalMaxima *globalMaxima,
GlobalDirection *globalDirection
) {
/**
* shared memory block for calculations. It requires
* extra (+1 in both directions) space to hold
* Neighboring cells
*/
__shared__ float s_matrix[SHARED_X+1][SHARED_Y+1];
/**
* shared memory block for storing the maximum value of each neighboring cell.
* Careful: the s_maxima[SHARED_X][SHARED_Y] does not contain the maximum value
* after the calculation loop! This value is determined at the end of this
* function.
*/
__shared__ float s_maxima[SHARED_X][SHARED_Y];
// calculate indices:
//unsigned int yDIVnumSeq = (blockIdx.y/NUMBER_SEQUENCES);
unsigned int blockx = x - blockIdx.y/NUMBER_TARGETS;//yDIVnumSeq;
unsigned int blocky = y + blockIdx.y/NUMBER_TARGETS;//yDIVnumSeq;
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;///numberOfBlocks;
unsigned char direction = NO_DIRECTION;
// indices of the current characters in both sequences.
int seqIndex1 = tIDx + bIDx * X + blockx * SHARED_X;
int seqIndex2 = tIDy + bIDy * Y + blocky * SHARED_Y;
/* the next block is to get the maximum value from surrounding blocks. This maximum values is compared to the
* first element in the shared score matrix s_matrix.
*/
float maxPrev = 0.0f;
if (!tIDx && !tIDy) {
if (blockx && blocky) {
maxPrev = max(max(globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky-1], globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky]), globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky-1]);
}
else if (blockx) {
maxPrev = globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky];
}
else if (blocky) {
maxPrev = globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky-1];
}
}
// local scorings variables:
float currentScore, ulS, lS, uS;
float innerScore = 0.0f;
/**
* tXM1 and tYM1 are to store the current value of the thread Index. tIDx and tIDy are
* both increased with 1 later on.
*/
unsigned int tXM1 = tIDx;
unsigned int tYM1 = tIDy;
// shared location for the parts of the 2 sequences, for faster retrieval later on:
__shared__ char s_seq1[SHARED_X];
__shared__ char s_seq2[SHARED_Y];
// copy sequence data to shared memory (shared is much faster than global)
if (!tIDy)
s_seq1[tIDx] = sequences[seqIndex1];
if (!tIDx)
s_seq2[tIDy] = targets[seqIndex2];
// set both matrices to zero
s_matrix[tIDx][tIDy] = 0.0f;
s_maxima[tIDx][tIDy] = 0.0f;
if (tIDx == SHARED_X-1 && ! tIDy)
s_matrix[SHARED_X][0] = 0.0f;
if (tIDy == SHARED_Y-1 && ! tIDx)
s_matrix[0][SHARED_Y] = 0.0f;
/**** sync barrier ****/
s_matrix[tIDx][tIDy] = 0.0f;
__syncthreads();
// initialize outer parts of the matrix:
if (!tIDx || !tIDy) {
if (tIDx == SHARED_X-1)
s_matrix[tIDx+1][tIDy] = 0.0f;
if (tIDy == SHARED_Y-1)
s_matrix[tIDx][tIDy+1] = 0.0f;
if (blockx && !tIDx) {
s_matrix[0][tIDy+1] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
}
if (blocky && !tIDy) {
s_matrix[tIDx+1][0] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
}
if (blockx && blocky && !tIDx && !tIDy){
s_matrix[0][0] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
}
}
// set inner score (aka sequence match/mismatch score):
char charS1 = s_seq1[tIDx];
char charS2 = s_seq2[tIDy];
innerScore = charS1 == FILL_CHARACTER || charS2 == FILL_CHARACTER ? FILL_SCORE : scoringsMatrix[charS1-characterOffset][charS2-characterOffset];
// transpose the index
++tIDx;
++tIDy;
// set shared matrix to zero (starting point!)
s_matrix[tIDx][tIDy] = 0.0f;
// wait until all elements have been copied to the shared memory block
/**** sync barrier ****/
__syncthreads();
currentScore = 0.0f;
for (int i=0; i < DIAGONAL; ++i) {
if (i == tXM1+ tYM1) {
// calculate only when there are two valid characters
// this is necessary when the two sequences are not of equal length
// this is the SW-scoring of the cell:
ulS = s_matrix[tXM1][tYM1] + innerScore;
lS = s_matrix[tXM1][tIDy] + gapScore;
uS = s_matrix[tIDx][tYM1] + gapScore;
if (currentScore < lS) { // score comes from left
currentScore = lS;
direction = LEFT_DIRECTION;
}
if (currentScore < uS) { // score comes from above
currentScore = uS;
direction = UPPER_DIRECTION;
}
if (currentScore < ulS) { // score comes from upper left
currentScore = ulS;
direction = UPPER_LEFT_DIRECTION;
}
s_matrix[tIDx][tIDy] = innerScore == FILL_SCORE ? 0.0 : currentScore; // copy score to matrix
}
else if (i-1 == tXM1 + tYM1 ){
// use this to find max
if (i==1) {
s_maxima[0][0] = max(maxPrev, currentScore);
}
else if (!tXM1 && tYM1) {
s_maxima[0][tYM1] = max(s_maxima[0][tYM1-1], currentScore);
}
else if (!tYM1 && tXM1) {
s_maxima[tXM1][0] = max(s_maxima[tXM1-1][0], currentScore);
}
else if (tXM1 && tYM1 ){
s_maxima[tXM1][tYM1] = max(s_maxima[tXM1-1][tYM1], max(s_maxima[tXM1][tYM1-1], currentScore));
}
}
// wait until all threads have calculated their new score
/**** sync barrier ****/
__syncthreads();
}
// copy end score to the scorings matrix:
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tXM1][tYM1] = s_matrix[tIDx][tIDy];
(*globalDirection).direction[bIDx][bIDy].localDirection[blockx][blocky].value[tXM1][tYM1] = direction;
if (tIDx==SHARED_X && tIDy==SHARED_Y)
globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky] = max(currentScore, max(s_maxima[SHARED_X-2][SHARED_Y-1], s_maxima[SHARED_X-1][SHARED_Y-2]));
// wait until all threads have copied their score:
/**** sync barrier ****/
__syncthreads();
}
/**
* The calculateScoreAffineGape function checks the alignment per block. It calculates the score for each cell in
* shared memory using the affine gap penalty score
* @matrix The scorings matrix
* @matrix_i The scorings matrix, gaps in X
* @matrix_j The scorings matrix, gaps in Y
* @x The start x block position in the alignment to be calculated
* @y The start y block position in the alignment to be calculated
* @numberOfBlocks The amount of blocks within an alignment which can be calculated
* @seq1 The upper sequence in the alignment
* @seq2 The left sequence in the alignment
*/
__global__ void calculateScoreAffineGap(GlobalMatrix *matrix, GlobalMatrix *matrix_i, GlobalMatrix *matrix_j,
unsigned int x, unsigned int y, unsigned int numberOfBlocks,
char *sequences, char *targets,
GlobalMaxima *globalMaxima,
GlobalDirection *globalDirection
) {
/**
* shared memory block for calculations. It requires
* extra (+1 in both directions) space to hold
* Neighboring cells
*/
__shared__ float s_matrix[SHARED_X+1][SHARED_Y+1];
__shared__ float s_matrix_i[SHARED_X+1][SHARED_Y+1];
__shared__ float s_matrix_j[SHARED_X+1][SHARED_Y+1];
/**
* shared memory block for storing the maximum value of each neighboring cell.
* Careful: the s_maxima[SHARED_X][SHARED_Y] does not contain the maximum value
* after the calculation loop! This value is determined at the end of this
* function.
*/
__shared__ float s_maxima[SHARED_X][SHARED_Y];
// calculate indices:
//unsigned int yDIVnumSeq = (blockIdx.y/NUMBER_SEQUENCES);
unsigned int blockx = x - blockIdx.y/NUMBER_TARGETS;//yDIVnumSeq;
unsigned int blocky = y + blockIdx.y/NUMBER_TARGETS;//yDIVnumSeq;
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;///numberOfBlocks;
unsigned char direction = NO_DIRECTION;
unsigned char direction_i = NO_DIRECTION;
unsigned char direction_j = NO_DIRECTION;
// indices of the current characters in both sequences.
int seqIndex1 = tIDx + bIDx * X + blockx * SHARED_X;
int seqIndex2 = tIDy + bIDy * Y + blocky * SHARED_Y;
/* the next block is to get the maximum value from surrounding blocks. This maximum values is compared to the
* first element in the shared score matrix s_matrix.
*/
float maxPrev = 0.0f;
if (!tIDx && !tIDy) {
if (blockx && blocky) {
maxPrev = max(max(globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky-1], globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky]), globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky-1]);
}
else if (blockx) {
maxPrev = globalMaxima->blockMaxima[bIDx][bIDy].value[blockx-1][blocky];
}
else if (blocky) {
maxPrev = globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky-1];
}
}
// local scorings variables:
float currentScore,currentScore_i, currentScore_j, m_M, m_I, m_J;
float innerScore = 0.0f;
/**
* tXM1 and tYM1 are to store the current value of the thread Index. tIDx and tIDy are
* both increased with 1 later on.
*/
unsigned int tXM1 = tIDx;
unsigned int tYM1 = tIDy;
// shared location for the parts of the 2 sequences, for faster retrieval later on:
__shared__ char s_seq1[SHARED_X];
__shared__ char s_seq2[SHARED_Y];
// copy sequence data to shared memory (shared is much faster than global)
if (!tIDy)
s_seq1[tIDx] = sequences[seqIndex1];
if (!tIDx)
s_seq2[tIDy] = targets[seqIndex2];
// set matrices to zero
s_matrix[tIDx][tIDy] = 0.0f;
s_matrix_i[tIDx][tIDy] = AFFINE_GAP_INIT;
s_matrix_j[tIDx][tIDy] = AFFINE_GAP_INIT;
s_maxima[tIDx][tIDy] = 0.0f;
if (tIDx == SHARED_X-1 && ! tIDy) {
s_matrix[SHARED_X][0] = 0.0f;
s_matrix_i[SHARED_X][0] = AFFINE_GAP_INIT;
s_matrix_j[SHARED_X][0] = AFFINE_GAP_INIT;
}
if (tIDy == SHARED_Y-1 && ! tIDx) {
s_matrix[0][SHARED_Y] = 0.0f;
s_matrix_i[0][SHARED_Y] = AFFINE_GAP_INIT;
s_matrix_j[0][SHARED_Y] = AFFINE_GAP_INIT;
}
/**** sync barrier ****/
__syncthreads();
// initialize outer parts of the matrix:
if (!tIDx || !tIDy) {
if (tIDx == SHARED_X-1) {
s_matrix[tIDx+1][tIDy] = 0.0f;
s_matrix_i[tIDx+1][tIDy] = AFFINE_GAP_INIT;
s_matrix_j[tIDx+1][tIDy] = AFFINE_GAP_INIT;
}
if (tIDy == SHARED_Y-1) {
s_matrix[tIDx][tIDy+1] = 0.0f;
s_matrix_i[tIDx][tIDy+1] = AFFINE_GAP_INIT;
s_matrix_j[tIDx][tIDy+1] = AFFINE_GAP_INIT;
}
if (blockx && !tIDx) {
s_matrix[0][tIDy+1] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
s_matrix_i[0][tIDy+1] = (*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
s_matrix_j[0][tIDy+1] = (*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
}
if (blocky && !tIDy) {
s_matrix[tIDx+1][0] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
s_matrix_i[tIDx+1][0] = (*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
s_matrix_j[tIDx+1][0] = (*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
}
if (blockx && blocky && !tIDx && !tIDy){
s_matrix[0][0] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
s_matrix_i[0][0] = (*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
s_matrix_j[0][0] = (*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
}
}
// set inner score (aka sequence match/mismatch score):
char charS1 = s_seq1[tIDx];
char charS2 = s_seq2[tIDy];
innerScore = charS1 == FILL_CHARACTER || charS2 == FILL_CHARACTER ? FILL_SCORE : scoringsMatrix[charS1-characterOffset][charS2-characterOffset];
// transpose the index
++tIDx;
++tIDy;
// set shared matrix to zero (starting point!)
s_matrix[tIDx][tIDy] = 0.0f;
s_matrix_i[tIDx][tIDy] = AFFINE_GAP_INIT;
s_matrix_j[tIDx][tIDy] = AFFINE_GAP_INIT;
// wait until all elements have been copied to the shared memory block
/**** sync barrier ****/
__syncthreads();
currentScore = 0.0f;
for (int i=0; i < DIAGONAL; ++i) {
if (i == tXM1+ tYM1) {
// calculate only when there are two valid characters
// this is necessary when the two sequences are not of equal length
// this is the SW-scoring of the cell:
m_M = s_matrix[tXM1][tYM1]+innerScore;
m_I = s_matrix_i[tXM1][tYM1]+innerScore;
m_J = s_matrix_j[tXM1][tYM1]+innerScore;
if (currentScore < m_I) { // score comes from I matrix (gap in x)
currentScore = m_I;
direction = A_DIRECTION | MAIN_MATRIX;
}
if (currentScore < m_J) { // score comes from J matrix (gap in y)
currentScore = m_J;
direction = A_DIRECTION | MAIN_MATRIX;
}
if (currentScore < m_M) { // score comes from m matrix (match)
currentScore = m_M;
direction = A_DIRECTION | MAIN_MATRIX;
}
s_matrix[tIDx][tIDy] = innerScore == FILL_SCORE ? 0.0 : currentScore; // copy score to matrix
// now do I matrix:
currentScore_i = AFFINE_GAP_INIT;
m_M = gapScore + gapExtension + s_matrix[tIDx][tYM1];
m_I = gapExtension + s_matrix_i[tIDx][tYM1];
if (currentScore_i < m_I) { // score comes from I matrix (gap in x)
currentScore_i = m_I;
direction_i = B_DIRECTION | I_MATRIX;
}
if (currentScore_i < m_M) { // score comes from m matrix (match)
currentScore_i = m_M;
direction_i= B_DIRECTION | I_MATRIX;
}
s_matrix_i[tIDx][tIDy] = currentScore_i < 0 ? AFFINE_GAP_INIT : currentScore_i; // copy score to matrix
// now do J matrix:
currentScore_j = AFFINE_GAP_INIT;
m_M = gapScore + gapExtension + s_matrix[tXM1][tIDy];
m_J = gapExtension + s_matrix_j[tXM1][tIDy];
if (currentScore_j < m_J) { // score comes from J matrix (gap in y)
currentScore_j = m_J;
direction_j = C_DIRECTION | J_MATRIX;
}
if (currentScore_j < m_M) { // score comes from m matrix (match)
currentScore_j = m_M;
direction_j = C_DIRECTION | J_MATRIX;
}
s_matrix_j[tIDx][tIDy] = currentScore_j < 0 ? AFFINE_GAP_INIT : currentScore_j; // copy score to matrix
currentScore = fmax(currentScore,fmax(currentScore_i,currentScore_j));
if (currentScore > 0) {
if (currentScore == s_matrix[tIDx][tIDy]) {// direction from main
direction = direction;
}
else if(currentScore == s_matrix_i[tIDx][tIDy]) {// direction from I
direction = direction_i;
}
else if(currentScore == s_matrix_j[tIDx][tIDy]){ // direction from J
direction = direction_j;
}
}
}
else if (i-1 == tXM1 + tYM1 ){
// use this to find max
if (i==1) {
s_maxima[0][0] = max(maxPrev, currentScore);
}
else if (!tXM1 && tYM1) {
s_maxima[0][tYM1] = max(s_maxima[0][tYM1-1], currentScore);
}
else if (!tYM1 && tXM1) {
s_maxima[tXM1][0] = max(s_maxima[tXM1-1][0], currentScore);
}
else if (tXM1 && tYM1 ){
s_maxima[tXM1][tYM1] = max(s_maxima[tXM1-1][tYM1], max(s_maxima[tXM1][tYM1-1], currentScore));
}
}
// wait until all threads have calculated their new score
/**** sync barrier ****/
__syncthreads();
}
// copy end score to the scorings matrix:
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tXM1][tYM1] = s_matrix[tIDx][tIDy];
(*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tXM1][tYM1] = s_matrix_i[tIDx][tIDy];
(*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tXM1][tYM1] = s_matrix_j[tIDx][tIDy];
(*globalDirection).direction[bIDx][bIDy].localDirection[blockx][blocky].value[tXM1][tYM1] = direction;
if (tIDx==SHARED_X && tIDy==SHARED_Y)
globalMaxima->blockMaxima[bIDx][bIDy].value[blockx][blocky] = max(currentScore, max(s_maxima[SHARED_X-2][SHARED_Y-1], s_maxima[SHARED_X-1][SHARED_Y-2]));
// wait until all threads have copied their score:
/**** sync barrier ****/
__syncthreads();
}
__global__ void traceback(GlobalMatrix *matrix, unsigned int x, unsigned int y, unsigned int numberOfBlocks, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection, GlobalDirection *globalDirectionZeroCopy, unsigned int *indexIncrement, StartingPoints *startingPoints, float *maxPossibleScore) {
/**
* shared memory block for calculations. It requires
* extra (+1 in both directions) space to hold
* Neighboring cells
*/
__shared__ float s_matrix[SHARED_X+1][SHARED_Y+1];
/**
* shared memory for storing the maximum value of this alignment.
*/
__shared__ float s_maxima[1];
__shared__ float s_maxPossibleScore[1];
// calculate indices:
unsigned int yDIVnumSeq = (blockIdx.y/NUMBER_TARGETS);
unsigned int blockx = x - yDIVnumSeq;
unsigned int blocky = y + yDIVnumSeq;
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;
float value;
if (!tIDx && !tIDy) {
s_maxima[0] = globalMaxima->blockMaxima[bIDx][bIDy].value[XdivSHARED_X-1][YdivSHARED_Y-1];
s_maxPossibleScore[0] = maxPossibleScore[bIDy*NUMBER_SEQUENCES+bIDx];
}
__syncthreads();
if (s_maxima[0]>= MINIMUM_SCORE) { // if the maximum score is below threshold, there is nothing to do
s_matrix[tIDx][tIDy] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy];
unsigned char direction = globalDirection->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy];
// wait until all elements have been copied to the shared memory block
/**** sync barrier ****/
__syncthreads();
for (int i=DIAGONAL-1; i >= 0; --i) {
if ((i == tIDx + tIDy) && direction == UPPER_LEFT_DIRECTION && s_matrix[tIDx][tIDy] >= LOWER_LIMIT_SCORE * s_maxima[0] && s_matrix[tIDx][tIDy] >= s_maxPossibleScore[0]) {
// found starting point!
// reserve index:
unsigned int index = atomicAdd(indexIncrement, 1);
// now copy this to host:
StartingPoint *start = &(startingPoints->startingPoint[index]);
start->sequence = bIDx;
start->target = bIDy;
start->blockX = blockx;
start->blockY = blocky;
start->valueX = tIDx;
start->valueY = tIDy;
start->score = s_matrix[tIDx][tIDy];
start->maxScore = s_maxima[0];
start->posScore = s_maxPossibleScore[0];
// startingPoints->startingPoint[index] = start;
// mark this value:
s_matrix[tIDx][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(s_matrix[tIDx][tIDy]));
}
__syncthreads();
if ((i == tIDx + tIDy) && s_matrix[tIDx][tIDy] < 0 && direction == UPPER_LEFT_DIRECTION) {
if (tIDx && tIDy){
value = s_matrix[tIDx-1][tIDy-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
s_matrix[tIDx-1][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (!tIDx && tIDy && blockx) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (!tIDx && !tIDy && blockx && blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (tIDx && !tIDy && blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx-1][SHARED_Y-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx-1][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
__syncthreads();
if ((i == tIDx + tIDy) && s_matrix[tIDx][tIDy] < 0 && direction == UPPER_DIRECTION) {
if (!tIDy) {
if (blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
else {
value = s_matrix[tIDx][tIDy-1];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
s_matrix[tIDx][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
__syncthreads();
if ((i == tIDx + tIDy) && s_matrix[tIDx][tIDy] < 0 && direction == LEFT_DIRECTION) {
if (!tIDx){
if (blockx) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
else {
value = s_matrix[tIDx-1][tIDy];
if (value == 0.0f)
direction = STOP_DIRECTION;
else
s_matrix[tIDx-1][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
__syncthreads();
}
// copy end score to the scorings matrix:
if (s_matrix[tIDx][tIDy] < 0) {
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy] = s_matrix[tIDx][tIDy];
globalDirectionZeroCopy->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy] = direction;
}
/**** sync barrier ****/
__syncthreads();
}
}
__device__ unsigned char tracebackStepLeftUp(unsigned int blockx, unsigned int blocky, float s_matrix[][SHARED_Y+1], GlobalMatrix *matrix, unsigned char direction){
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;
unsigned char dir = direction;
float value;
if (tIDx && tIDy){
value = s_matrix[tIDx-1][tIDy-1];
if (value == 0.0f)
dir = STOP_DIRECTION;
else
s_matrix[tIDx-1][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (!tIDx && tIDy && blockx) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy-1];
if (value == 0.0f)
dir = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (!tIDx && !tIDy && blockx && blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1];
if (value == 0.0f)
dir = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky-1].value[SHARED_X-1][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
else if (tIDx && !tIDy && blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx-1][SHARED_Y-1];
if (value == 0.0f)
dir = STOP_DIRECTION;
else
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx-1][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
return dir;
}
__device__ unsigned char tracebackStepUp(unsigned int blockx, unsigned int blocky, float s_matrix[][SHARED_Y+1], GlobalMatrix *matrix, unsigned char direction){
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;
unsigned char dir = direction;
float value;
if (!tIDy) {
if (blocky) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1];
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky-1].value[tIDx][SHARED_Y-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
else {
value = s_matrix[tIDx][tIDy-1];
s_matrix[tIDx][tIDy-1] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
return dir;
}
__device__ unsigned char tracebackStepLeft(unsigned int blockx, unsigned int blocky, float s_matrix[][SHARED_Y+1], GlobalMatrix *matrix, unsigned char direction){
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;
unsigned char dir = direction;
float value;
if (!tIDx){
if (blockx) {
value = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy];
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx-1][blocky].value[SHARED_X-1][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
}
else {
value = s_matrix[tIDx-1][tIDy];
s_matrix[tIDx-1][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(value));
}
return dir;
}
__global__ void tracebackAffineGap(GlobalMatrix *matrix, GlobalMatrix *matrix_i, GlobalMatrix *matrix_j,unsigned int x, unsigned int y, unsigned int numberOfBlocks, GlobalMaxima *globalMaxima, GlobalDirection *globalDirection, GlobalDirection *globalDirectionZeroCopy, unsigned int *indexIncrement, StartingPoints *startingPoints, float *maxPossibleScore){
/**
* shared memory block for calculations. It requires
* extra (+1 in both directions) space to hold
* Neighboring cells
*/
__shared__ float s_matrix[SHARED_X+1][SHARED_Y+1];
__shared__ float s_matrix_i[SHARED_X+1][SHARED_Y+1];
__shared__ float s_matrix_j[SHARED_X+1][SHARED_Y+1];
/**
* shared memory for storing the maximum value of this alignment.
*/
__shared__ float s_maxima[1];
__shared__ float s_maxPossibleScore[1];
// calculate indices:
unsigned int yDIVnumSeq = (blockIdx.y/NUMBER_TARGETS);
unsigned int blockx = x - yDIVnumSeq;
unsigned int blocky = y + yDIVnumSeq;
unsigned int tIDx = threadIdx.x;
unsigned int tIDy = threadIdx.y;
unsigned int bIDx = blockIdx.x;
unsigned int bIDy = blockIdx.y%NUMBER_TARGETS;
if (!tIDx && !tIDy) {
s_maxima[0] = globalMaxima->blockMaxima[bIDx][bIDy].value[XdivSHARED_X-1][YdivSHARED_Y-1];
s_maxPossibleScore[0] = maxPossibleScore[bIDy*NUMBER_SEQUENCES+bIDx];
}
__syncthreads();
if (s_maxima[0]>= MINIMUM_SCORE) { // if the maximum score is below threshold, there is nothing to do
unsigned char direction = DIRECTION_MASK & globalDirection->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy];
unsigned char matrix_source = MATRIX_MASK & globalDirection->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy];
s_matrix[tIDx][tIDy] = (*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy];
s_matrix_i[tIDx][tIDy] = (*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy];
s_matrix_j[tIDx][tIDy] = (*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy];
// wait until all elements have been copied to the shared memory block
/**** sync barrier ****/
__syncthreads();
for (int i=DIAGONAL-1; i >= 0; --i) {
if ((i == tIDx + tIDy) && matrix_source == MAIN_MATRIX && s_matrix[tIDx][tIDy] >= LOWER_LIMIT_SCORE * s_maxima[0] && s_matrix[tIDx][tIDy] >= s_maxPossibleScore[0]) {
// found starting point!
// reserve index:
unsigned int index = atomicAdd(indexIncrement, 1);
// now copy this to host:
StartingPoint *start = &(startingPoints->startingPoint[index]);
start->sequence = bIDx;
start->target = bIDy;
start->blockX = blockx;
start->blockY = blocky;
start->valueX = tIDx;
start->valueY = tIDy;
start->score = s_matrix[tIDx][tIDy];
start->maxScore = s_maxima[0];
start->posScore = s_maxPossibleScore[0];
// startingPoints->startingPoint[index] = start;
// mark this value:
s_matrix[tIDx][tIDy] = __int_as_float(SIGN_BIT_MASK | __float_as_int(s_matrix[tIDx][tIDy]));
}
__syncthreads();
if ((i == tIDx + tIDy) && (
(s_matrix[tIDx][tIDy] < 0 && matrix_source == MAIN_MATRIX) ||
(s_matrix_i[tIDx][tIDy] < 0 && s_matrix_i[tIDx][tIDy] > AFFINE_GAP_INIT && matrix_source == I_MATRIX) ||
(s_matrix_j[tIDx][tIDy] < 0 && s_matrix_j[tIDx][tIDy] > AFFINE_GAP_INIT && matrix_source == J_MATRIX)
)) {
// check which matrix to go to:
switch (direction) {
case A_DIRECTION : // M
direction = tracebackStepLeftUp(blockx, blocky, s_matrix, matrix, direction);
break;
case B_DIRECTION : // I
direction = tracebackStepUp(blockx, blocky, s_matrix_i, matrix_i, direction);
break;
case C_DIRECTION : // J
direction = tracebackStepLeft(blockx, blocky, s_matrix_j, matrix_j, direction);
break;
}
}
__syncthreads();
}
if (matrix_source == MAIN_MATRIX) {
(*matrix).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy] = s_matrix[tIDx][tIDy];
globalDirectionZeroCopy->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy] = direction;
}
else if (matrix_source == I_MATRIX) {
(*matrix_i).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy] = s_matrix_i[tIDx][tIDy];
globalDirectionZeroCopy->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy] = direction;
}
else if (matrix_source == J_MATRIX) {
(*matrix_j).metaMatrix[bIDx][bIDy].matrix[blockx][blocky].value[tIDx][tIDy] = s_matrix_j[tIDx][tIDy];
globalDirectionZeroCopy->direction[bIDx][bIDy].localDirection[blockx][blocky].value[tIDx][tIDy] = direction;
}
/**** sync barrier ****/
__syncthreads();
}
}
|
82eb582cf2d895bcfa07f92b828d86a9809c29e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHTensor.hpp"
#include "common.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include "TH/THHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateOutput(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3))
{
Acctype sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.getSize(1) + padT);
int hend = min(hstart + kH, input.getSize(2) + padH);
int wend = min(wstart + kW, input.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.getSize(1));
hend = min(hend, input.getSize(2));
wend = min(wend, input.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
Dtype val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<Acctype, Dtype>::to(sum / divide_factor);
}
}
// Inner-most loop size (kW) passed as template parameter for
// performance reasons.
//
template<int KERNEL_WIDTH, typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateOutput_fixedKW(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
int kT, int kH,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3))
{
Acctype sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.getSize(1) + padT);
int hend = min(hstart + kH, input.getSize(2) + padH);
int wend = min(wstart + KERNEL_WIDTH, input.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.getSize(1));
hend = min(hend, input.getSize(2));
wend = min(wend, input.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
Dtype val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<Acctype, Dtype>::to(sum / divide_factor);
}
}
#define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
hipLaunchKernelGGL(( cuda_VolumetricAveragePooling_updateOutput_fixedKW<KW, scalar_t, accreal>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
cudaInput, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); \
break
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput_Stride1(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
Acctype normFactor, int offsetZ)
{
int iCol = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.getSize(1); // input frame/time
int slice = (blockIdx.z + offsetZ) / gradInput.getSize(1); // input slice/feature
// guard against over-tiled threads
if (iRow < gradInput.getSize(2) && iCol < gradInput.getSize(3))
{
Acctype sum = 0.0;
Dtype *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)]
[max(0, iRow - kH + 1)][max(0, iCol - kW + 1)];
int frameOffset = 0;
for (int oFrame = max(0, iFrame - kT + 1);
oFrame < min(iFrame + 1, gradOutput.getSize(1));
++oFrame)
{
int rowOffset = frameOffset;
for (int oRow = max(0, iRow - kH + 1);
oRow < min(iRow + 1, gradOutput.getSize(2));
++oRow)
{
int colOffset = rowOffset;
for (int oCol = max(0, iCol - kW + 1);
oCol < min(iCol + 1, gradOutput.getSize(3));
++oCol)
{
sum += gOut[colOffset];
++colOffset;
}
rowOffset += gradOutput.getSize(3);
}
frameOffset += gradOutput.getSize(2) * gradOutput.getSize(3);
}
gradInput[slice][iFrame][iRow][iCol] = ScalarConvert<Acctype, Dtype>::to(sum * normFactor);
}
}
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput_atomicAdd(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.getSize(1) + padT);
int hend = min(hstart + kH, gradInput.getSize(2) + padH);
int wend = min(wstart + kW, gradInput.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.getSize(1));
hend = min(hend, gradInput.getSize(2));
wend = min(wend, gradInput.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
Dtype val = ScalarConvert<Acctype, Dtype>::to(
ScalarConvert<Dtype, Acctype>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
atomicAdd(&gradInput[slice][iFrame][iRow][iCol], val);
}
}
}
}
}
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.getSize(1) + padT);
int hend = min(hstart + kH, gradInput.getSize(2) + padH);
int wend = min(wstart + kW, gradInput.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.getSize(1));
hend = min(hend, gradInput.getSize(2));
wend = min(wend, gradInput.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
Dtype val = ScalarConvert<Acctype, Dtype>::to(
ScalarConvert<Dtype, Acctype>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
gradInput[slice][iFrame][iRow][iCol] = val;
}
}
}
}
}
#include "generic/VolumetricAveragePooling.cu"
#include "THHGenerateFloatTypes.h"
| 82eb582cf2d895bcfa07f92b828d86a9809c29e9.cu | #include "THCUNN.h"
#include "THCTensor.hpp"
#include "common.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include "TH/THHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateOutput(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3))
{
Acctype sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.getSize(1) + padT);
int hend = min(hstart + kH, input.getSize(2) + padH);
int wend = min(wstart + kW, input.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.getSize(1));
hend = min(hend, input.getSize(2));
wend = min(wend, input.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
Dtype val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<Acctype, Dtype>::to(sum / divide_factor);
}
}
// Inner-most loop size (kW) passed as template parameter for
// performance reasons.
//
template<int KERNEL_WIDTH, typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateOutput_fixedKW(
THCDeviceTensor<Dtype, 4> input,
THCDeviceTensor<Dtype, 4> output,
int kT, int kH,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oCol < output.getSize(3))
{
Acctype sum = 0.0;
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, input.getSize(1) + padT);
int hend = min(hstart + kH, input.getSize(2) + padH);
int wend = min(wstart + KERNEL_WIDTH, input.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, input.getSize(1));
hend = min(hend, input.getSize(2));
wend = min(wend, input.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
int ti, hi, wi;
for (ti = tstart; ti < tend; ++ti)
{
for (hi = hstart; hi < hend; ++hi)
{
for (wi = wstart; wi < wend; ++wi)
{
Dtype val = input[slice][ti][hi][wi];
sum += val;
}
}
}
output[slice][oFrame][oRow][oCol] = ScalarConvert<Acctype, Dtype>::to(sum / divide_factor);
}
}
#define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
cuda_VolumetricAveragePooling_updateOutput_fixedKW<KW, scalar_t, accreal> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
cudaInput, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); \
break
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput_Stride1(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
Acctype normFactor, int offsetZ)
{
int iCol = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.getSize(1); // input frame/time
int slice = (blockIdx.z + offsetZ) / gradInput.getSize(1); // input slice/feature
// guard against over-tiled threads
if (iRow < gradInput.getSize(2) && iCol < gradInput.getSize(3))
{
Acctype sum = 0.0;
Dtype *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)]
[max(0, iRow - kH + 1)][max(0, iCol - kW + 1)];
int frameOffset = 0;
for (int oFrame = max(0, iFrame - kT + 1);
oFrame < min(iFrame + 1, gradOutput.getSize(1));
++oFrame)
{
int rowOffset = frameOffset;
for (int oRow = max(0, iRow - kH + 1);
oRow < min(iRow + 1, gradOutput.getSize(2));
++oRow)
{
int colOffset = rowOffset;
for (int oCol = max(0, iCol - kW + 1);
oCol < min(iCol + 1, gradOutput.getSize(3));
++oCol)
{
sum += gOut[colOffset];
++colOffset;
}
rowOffset += gradOutput.getSize(3);
}
frameOffset += gradOutput.getSize(2) * gradOutput.getSize(3);
}
gradInput[slice][iFrame][iRow][iCol] = ScalarConvert<Acctype, Dtype>::to(sum * normFactor);
}
}
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput_atomicAdd(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.getSize(1) + padT);
int hend = min(hstart + kH, gradInput.getSize(2) + padH);
int wend = min(wstart + kW, gradInput.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.getSize(1));
hend = min(hend, gradInput.getSize(2));
wend = min(wend, gradInput.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
Dtype val = ScalarConvert<Acctype, Dtype>::to(
ScalarConvert<Dtype, Acctype>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
atomicAdd(&gradInput[slice][iFrame][iRow][iCol], val);
}
}
}
}
}
template <typename Dtype, typename Acctype>
__global__ void cuda_VolumetricAveragePooling_updateGradInput(
THCDeviceTensor<Dtype, 4> gradOutput,
THCDeviceTensor<Dtype, 4> gradInput,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW,
bool count_include_pad, int offsetZ)
{
int oCol = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // gradOutput frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // gradOutput slice/feature
// guard against over-tiled threads
if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3))
{
int tstart = oFrame * dT - padT;
int hstart = oRow * dH - padH;
int wstart = oCol * dW - padW;
int tend = min(tstart + kT, gradInput.getSize(1) + padT);
int hend = min(hstart + kH, gradInput.getSize(2) + padH);
int wend = min(wstart + kW, gradInput.getSize(3) + padW);
int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = max(tstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
tend = min(tend, gradInput.getSize(1));
hend = min(hend, gradInput.getSize(2));
wend = min(wend, gradInput.getSize(3));
Acctype divide_factor;
if (count_include_pad)
divide_factor = static_cast<Acctype>(pool_size);
else
divide_factor = static_cast<Acctype>((tend - tstart) * (hend - hstart) * (wend - wstart));
Dtype val = ScalarConvert<Acctype, Dtype>::to(
ScalarConvert<Dtype, Acctype>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor);
for (int iFrame = tstart; iFrame < tend; ++iFrame)
{
for (int iRow = hstart; iRow < hend; ++iRow)
{
for (int iCol = wstart; iCol < wend; ++iCol)
{
gradInput[slice][iFrame][iRow][iCol] = val;
}
}
}
}
}
#include "generic/VolumetricAveragePooling.cu"
#include "THCGenerateFloatTypes.h"
|
1a1da708407408fffaed6b0566f6ac125289c217.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* random_number_generator_kernels.cu
*
* Created: 2015-05-18, Modified: 2015-07-25
*
*/
// Headers includes:
#include <cuda/kernels.cuh>
#include <cuda/Cuda_Prototypes_Macros.h>
/** Parallel random number generator.
* with a master seed and local seed per thread,
* seed = masterseed + thread_global_id
*/
__global__ void random_number_generator_kernel(int masterSeed, /* in */
int size, /* in */
float *PRNG) /* out */
{
long int a = 16807; // same as apple c++ imp
long int m = 2147483647; // 2^31 1
float rec = 1.0 / m;
long int theta, temp;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
long int seed = masterSeed + tid; // every thread has diffrent seed
while (tid < size) {
temp = seed * a; // seed = Xn , c = 0
theta = temp - m * floor(temp * rec); // is the same as (temp mod m) ((Xn * a) mod m)
seed = theta;
PRNG[tid] = (float)theta/m; // between 1/m - 1
tid += stride;
}
}
//printf("R[%d] = %.2f\n", tid, PRNG[tid]);
| 1a1da708407408fffaed6b0566f6ac125289c217.cu | /*
* random_number_generator_kernels.cu
*
* Created: 2015-05-18, Modified: 2015-07-25
*
*/
// Headers includes:
#include <cuda/kernels.cuh>
#include <cuda/Cuda_Prototypes_Macros.h>
/** Parallel random number generator.
* with a master seed and local seed per thread,
* seed = masterseed + thread_global_id
*/
__global__ void random_number_generator_kernel(int masterSeed, /* in */
int size, /* in */
float *PRNG) /* out */
{
long int a = 16807; // same as apple c++ imp
long int m = 2147483647; // 2^31 − 1
float rec = 1.0 / m;
long int theta, temp;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
long int seed = masterSeed + tid; // every thread has diffrent seed
while (tid < size) {
temp = seed * a; // seed = Xn , c = 0
theta = temp - m * floor(temp * rec); // is the same as (temp mod m) ((Xn * a) mod m)
seed = theta;
PRNG[tid] = (float)theta/m; // between 1/m - 1
tid += stride;
}
}
//printf("R[%d] = %.2f\n", tid, PRNG[tid]);
|
fa00ebbb4c85e45a1cb0756a092db093e1c4c8ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/atom/copy_traits_sm75.hpp>
using namespace cute;
template <class T>
__global__ void
ldsm_test_device(uint16_t* g_in, uint16_t* g_out)
{
constexpr int count = sizeof(T) / 4;
int tid = threadIdx.x;
int stride = blockDim.x;
// load input gmem -> smem
__shared__ uint32_t smem[32 * count];
for (int i = 0; i < count; ++i) {
smem[tid + (stride * i)] = reinterpret_cast<uint32_t*>(g_in)[tid + (stride * i)];
}
__syncthreads();
uint32_t reg[count];
for (int i = 0; i < count; ++i) {
reg[i] = 0;
}
// load smem -> rmem using LDSM
uint128_t* smem_ptr = reinterpret_cast<uint128_t*>(smem) + tid;
T* rmem_ptr = reinterpret_cast<T*>(reg);
cute::copy_ldsm(smem_ptr, rmem_ptr);
// store output rmem -> gmem
for (int i = 0; i < count; ++i) {
reinterpret_cast<uint32_t*>(g_out)[tid + (stride * i)] = reg[i];
}
}
template <class TiledCopy, class SmemLayout>
__global__ void
ldsm_test_device_cute(uint16_t* g_in, uint16_t* g_out,
TiledCopy tiled_copy, SmemLayout smem_layout)
{
using namespace cute;
__shared__ uint16_t smem[size(smem_layout)];
auto t_g_in = make_tensor(make_gmem_ptr(g_in), smem_layout);
auto t_g_out = make_tensor(make_gmem_ptr(g_out), smem_layout);
auto t_smem = make_tensor(make_smem_ptr(smem), smem_layout);
int tid = threadIdx.x;
// Load input gmem -> smem
for (int i = tid; i < size(t_smem); i += size(tiled_copy)) {
t_smem(i) = t_g_in(i);
}
__syncthreads();
auto thr_copy = tiled_copy.get_thread_slice(tid);
auto tXsX = thr_copy.partition_S(t_smem); // (V,M,N)
auto tXgX = thr_copy.partition_D(t_g_out); // (V,M,N)
auto tXrX = make_tensor<uint16_t>(shape(tXgX)); // (V,M,N)
clear(tXrX); // Just to make sure
/*
if (thread0()) {
print("tXsX: " ); print(tXsX.layout()); print("\n");
print("tXgX: " ); print(tXgX.layout()); print("\n");
print("tXrX: " ); print(tXrX.layout()); print("\n");
}
*/
// Copy smem -> rmem via tiled_copy (LDSM, LDS)
copy(tiled_copy, tXsX, tXrX);
// Output rmem -> gmem
copy(tXrX, tXgX);
}
TEST(SM80_CuTe_Ampere, Ldsm)
{
constexpr int count = 1024;
thrust::host_vector<uint16_t> h_in(count);
for (int i = 0; i < count; ++i) {
h_in[i] = uint16_t(i);
}
thrust::device_vector<uint16_t> d_in = h_in;
//
// LDSM 1x (32b)
//
{
thrust::device_vector<uint16_t> d_out(count);
hipLaunchKernelGGL(( ldsm_test_device<uint32_t>), dim3(1), dim3(32), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 32; ++i) {
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("LDSM 1x ldsm_test_device SUCCESS\n");
}
//
// LDSM 2x (64b)
//
{
thrust::device_vector<uint16_t> d_out(count);
hipLaunchKernelGGL(( ldsm_test_device<uint64_t>), dim3(1), dim3(32), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 64; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("LDSM 2x ldsm_test_device SUCCESS\n");
}
//
// LDSM 4x (128b)
//
{
thrust::device_vector<uint16_t> d_out(count);
hipLaunchKernelGGL(( ldsm_test_device<uint128_t>), dim3(1), dim3(32), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 128; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("LDSM 4x ldsm_test_device SUCCESS\n");
}
//
// CuTe LDSM
//
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x1_LDSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x1_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x2_LDSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x2_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x4_LDSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x4_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<UniversalCopy<uint16_t>, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i] , h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved LDS.U16 SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x1_LDSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x1_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x2_LDSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x2_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x4_LDSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x4_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<UniversalCopy<uint16_t>, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 LDS.U16 SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U16x2_LDSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_2,_1>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x2_LDSM_T SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U16x4_LDSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_4,_1>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x4_LDSM_T SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U16x8_LDSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_8,_1>>{});
hipLaunchKernelGGL(( ldsm_test_device_cute), dim3(1), dim3(int(size(tiled_copy))), 0, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x8_LDSM_T SUCCESS\n");
}
CUTLASS_TRACE_HOST("PASS");
}
| fa00ebbb4c85e45a1cb0756a092db093e1c4c8ba.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
#include <cute/atom/copy_traits_sm75.hpp>
using namespace cute;
template <class T>
__global__ void
ldsm_test_device(uint16_t* g_in, uint16_t* g_out)
{
constexpr int count = sizeof(T) / 4;
int tid = threadIdx.x;
int stride = blockDim.x;
// load input gmem -> smem
__shared__ uint32_t smem[32 * count];
for (int i = 0; i < count; ++i) {
smem[tid + (stride * i)] = reinterpret_cast<uint32_t*>(g_in)[tid + (stride * i)];
}
__syncthreads();
uint32_t reg[count];
for (int i = 0; i < count; ++i) {
reg[i] = 0;
}
// load smem -> rmem using LDSM
uint128_t* smem_ptr = reinterpret_cast<uint128_t*>(smem) + tid;
T* rmem_ptr = reinterpret_cast<T*>(reg);
cute::copy_ldsm(smem_ptr, rmem_ptr);
// store output rmem -> gmem
for (int i = 0; i < count; ++i) {
reinterpret_cast<uint32_t*>(g_out)[tid + (stride * i)] = reg[i];
}
}
template <class TiledCopy, class SmemLayout>
__global__ void
ldsm_test_device_cute(uint16_t* g_in, uint16_t* g_out,
TiledCopy tiled_copy, SmemLayout smem_layout)
{
using namespace cute;
__shared__ uint16_t smem[size(smem_layout)];
auto t_g_in = make_tensor(make_gmem_ptr(g_in), smem_layout);
auto t_g_out = make_tensor(make_gmem_ptr(g_out), smem_layout);
auto t_smem = make_tensor(make_smem_ptr(smem), smem_layout);
int tid = threadIdx.x;
// Load input gmem -> smem
for (int i = tid; i < size(t_smem); i += size(tiled_copy)) {
t_smem(i) = t_g_in(i);
}
__syncthreads();
auto thr_copy = tiled_copy.get_thread_slice(tid);
auto tXsX = thr_copy.partition_S(t_smem); // (V,M,N)
auto tXgX = thr_copy.partition_D(t_g_out); // (V,M,N)
auto tXrX = make_tensor<uint16_t>(shape(tXgX)); // (V,M,N)
clear(tXrX); // Just to make sure
/*
if (thread0()) {
print("tXsX: " ); print(tXsX.layout()); print("\n");
print("tXgX: " ); print(tXgX.layout()); print("\n");
print("tXrX: " ); print(tXrX.layout()); print("\n");
}
*/
// Copy smem -> rmem via tiled_copy (LDSM, LDS)
copy(tiled_copy, tXsX, tXrX);
// Output rmem -> gmem
copy(tXrX, tXgX);
}
TEST(SM80_CuTe_Ampere, Ldsm)
{
constexpr int count = 1024;
thrust::host_vector<uint16_t> h_in(count);
for (int i = 0; i < count; ++i) {
h_in[i] = uint16_t(i);
}
thrust::device_vector<uint16_t> d_in = h_in;
//
// LDSM 1x (32b)
//
{
thrust::device_vector<uint16_t> d_out(count);
ldsm_test_device<uint32_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 32; ++i) {
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("LDSM 1x ldsm_test_device SUCCESS\n");
}
//
// LDSM 2x (64b)
//
{
thrust::device_vector<uint16_t> d_out(count);
ldsm_test_device<uint64_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 64; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("LDSM 2x ldsm_test_device SUCCESS\n");
}
//
// LDSM 4x (128b)
//
{
thrust::device_vector<uint16_t> d_out(count);
ldsm_test_device<uint128_t><<<1, 32>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()));
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < 128; ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("LDSM 4x ldsm_test_device SUCCESS\n");
}
//
// CuTe LDSM
//
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x1_LDSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x1_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x2_LDSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x2_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x4_LDSM_N, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved U32x4_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,Shape <_2, _4>>,
Stride< _2,Stride<_1,_64>>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<UniversalCopy<uint16_t>, uint16_t>{},
Layout<Shape<_32,_1>>{},
Layout<Shape< _1,_8>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i] , h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x8 interleaved LDS.U16 SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x1_LDSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x1_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x2_LDSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x2_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U32x4_LDSM_N, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U32x4_LDSM_N SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride< _1,_32>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<UniversalCopy<uint16_t>, uint16_t>{},
Layout<Shape<_16,_2>>{},
Layout<Shape< _2,_4>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 LDS.U16 SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U16x2_LDSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_2,_1>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x2_LDSM_T SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U16x4_LDSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_4,_1>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x4_LDSM_T SUCCESS\n");
}
{
thrust::device_vector<uint16_t> d_out(count);
auto smem_layout = Layout<Shape <_32,_32>,
Stride<_32, _1>>{};
auto tiled_copy = make_tiled_copy(Copy_Atom<SM75_U16x8_LDSM_T, uint16_t>{},
Layout<Shape<_4,_8>>{},
Layout<Shape<_8,_1>>{});
ldsm_test_device_cute<<<1, int(size(tiled_copy))>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tiled_copy,
smem_layout);
thrust::host_vector<uint16_t> h_out = d_out;
for (int i = 0; i < size(smem_layout); ++i) {
//printf("%d %d\n", int(h_in[i]), int(h_out[i]));
EXPECT_EQ(h_out[i], h_in[i]);
}
CUTLASS_TRACE_HOST("CuTe 32x32 U16x8_LDSM_T SUCCESS\n");
}
CUTLASS_TRACE_HOST("PASS");
}
|
06dcb133a8c8e8244a267de181fc46a33f4b9511.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
extern "C" __global__ void generate(const Int64 shIn0_2, const Int64 shIn0_1, const Int64 shIn0_0, const double* __restrict__ arrIn0_0, const Int64 shIn1_2, const Int64 shIn1_1, const Int64 shIn1_0, const double* __restrict__ arrIn1_5, const double* __restrict__ arrIn1_4, const double* __restrict__ arrIn1_3, const double* __restrict__ arrIn1_2, const double* __restrict__ arrIn1_1, const double* __restrict__ arrIn1_0, const Int64 shIn2_2, const Int64 shIn2_1, const Int64 shIn2_0, const double* __restrict__ arrIn2_3, const double* __restrict__ arrIn2_2, const double* __restrict__ arrIn2_1, const double* __restrict__ arrIn2_0, const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_1, double* __restrict__ arrOut_0)
{
const int shapeSize = shOut_2 * (shOut_1 * shOut_0);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 tmp_2 = tmp_1 / shOut_1;
const Int64 sh2 = tmp_2 % shOut_2;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
const Word8 v1 = ({ const Int64 v0 = (sh2 * shIn2_1 + sh1) * shIn2_0 + sh0; ; arrIn2_1[v0]; }) > 0.0;
double lv580;
double lv581;
if (v1) {
lv581 = 0.0;
lv580 = 0.0;
} else {
const double v5 = ({ const Int64 v2 = (sh2 * shIn0_1 + sh1) * shIn0_0 + sh0; ; arrIn0_0[v2]; }) / (({ const Int64 v3 = (sh2 * shIn0_1 + sh1) * shIn0_0 + sh0; ; arrIn0_0[v3]; }) * ({ const Int64 v4 = (sh2 * shIn2_1 + sh1) * shIn2_0 + sh0; ; arrIn2_3[v4]; }));
const Int64 v6 = (sh2 * shIn1_1 + sh1) * shIn1_0 + sh0;
const double v7 = arrIn1_2[v6];
const double v8 = arrIn1_1[v6];
const double v9 = arrIn1_0[v6];
const Int64 v10 = (sh2 * shIn1_1 + sh1) * shIn1_0 + sh0;
const double v11 = arrIn1_5[v10];
const double v12 = arrIn1_4[v10];
const double v13 = arrIn1_3[v10];
const double v14 = v11 * v7;
const double v15 = v12 * v8;
const double v16 = v13 * v9;
const double v17 = fmin(0.0, v14);
const double v18 = fmin(0.0, v15);
const double v19 = fmin(0.0, v16);
const Int64 v20 = (Int64) 0;
const Int64 v21 = (Int64) 1;
const double v55 = fmin(2.0, fmax(0.0, ({ const double v22 = 1.0 / (1.0e-36 + v7); const double v27 = ({ const Int64 v23 = v21 + sh0; const Word8 v24 = v23 >= shIn0_0 || (sh1 >= shIn0_0 || sh2 >= shIn0_0); double lv260; double lv261; double lv262; if (v24) { lv262 = 0.0; lv261 = 0.0; lv260 = 0.0; } else { const Int64 v25 = (max(v20, sh2) * shIn1_1 + max(v20, sh1)) * shIn1_0 + max(v20, v23); lv262 = arrIn1_2[v25]; lv261 = arrIn1_1[v25]; lv260 = arrIn1_0[v25]; } ; lv262; }) * v22; ; fmin(({ const double v32 = ({ const Int64 v28 = (Int64) -1 + sh0; const Word8 v29 = v28 >= shIn0_0 || (sh1 >= shIn0_0 || sh2 >= shIn0_0); double lv310; double lv311; double lv312; if (v29) { lv312 = 0.0; lv311 = 0.0; lv310 = 0.0; } else { const Int64 v30 = (max(v20, sh2) * shIn1_1 + max(v20, sh1)) * shIn1_0 + max(v20, v28); lv312 = arrIn1_2[v30]; lv311 = arrIn1_1[v30]; lv310 = arrIn1_0[v30]; } ; lv312; }) * v22; ; fmin(v32, 0.5 * (v32 + v27)); }), v27); })));
const double v56 = fmin(2.0, fmax(0.0, ({ const double v33 = 1.0 / (1.0e-36 + v8); const double v38 = ({ const Int64 v34 = v21 + sh1; const Word8 v35 = sh0 >= shIn0_0 || (v34 >= shIn0_0 || sh2 >= shIn0_0); double lv370; double lv371; double lv372; if (v35) { lv372 = 0.0; lv371 = 0.0; lv370 = 0.0; } else { const Int64 v36 = (max(v20, sh2) * shIn1_1 + max(v20, v34)) * shIn1_0 + max(v20, sh0); lv372 = arrIn1_2[v36]; lv371 = arrIn1_1[v36]; lv370 = arrIn1_0[v36]; } ; lv371; }) * v33; ; fmin(({ const double v43 = ({ const Int64 v39 = (Int64) -1 + sh1; const Word8 v40 = sh0 >= shIn0_0 || (v39 >= shIn0_0 || sh2 >= shIn0_0); double lv420; double lv421; double lv422; if (v40) { lv422 = 0.0; lv421 = 0.0; lv420 = 0.0; } else { const Int64 v41 = (max(v20, sh2) * shIn1_1 + max(v20, v39)) * shIn1_0 + max(v20, sh0); lv422 = arrIn1_2[v41]; lv421 = arrIn1_1[v41]; lv420 = arrIn1_0[v41]; } ; lv421; }) * v33; ; fmin(v43, 0.5 * (v43 + v38)); }), v38); })));
const double v57 = fmin(2.0, fmax(0.0, ({ const double v44 = 1.0 / (1.0e-36 + v9); const double v49 = ({ const Int64 v45 = v21 + sh2; const Word8 v46 = sh0 >= shIn0_0 || (sh1 >= shIn0_0 || v45 >= shIn0_0); double lv480; double lv481; double lv482; if (v46) { lv482 = 0.0; lv481 = 0.0; lv480 = 0.0; } else { const Int64 v47 = (max(v20, v45) * shIn1_1 + max(v20, sh1)) * shIn1_0 + max(v20, sh0); lv482 = arrIn1_2[v47]; lv481 = arrIn1_1[v47]; lv480 = arrIn1_0[v47]; } ; lv480; }) * v44; ; fmin(({ const double v54 = ({ const Int64 v50 = (Int64) -1 + sh2; const Word8 v51 = sh0 >= shIn0_0 || (sh1 >= shIn0_0 || v50 >= shIn0_0); double lv530; double lv531; double lv532; if (v51) { lv532 = 0.0; lv531 = 0.0; lv530 = 0.0; } else { const Int64 v52 = (max(v20, v50) * shIn1_1 + max(v20, sh1)) * shIn1_0 + max(v20, sh0); lv532 = arrIn1_2[v52]; lv531 = arrIn1_1[v52]; lv530 = arrIn1_0[v52]; } ; lv530; }) * v44; ; fmin(v54, 0.5 * (v54 + v49)); }), v49); })));
lv581 = -(0.5 * v5 * (v17 * (1.0 - v55) + v18 * (1.0 - v56) + v19 * (1.0 - v57)));
lv580 = 0.6666666666666666 * v5 * (v17 * v17 * (1.0 - v55 * v55) + v18 * v18 * (1.0 - v56 * v56) + v19 * v19 * (1.0 - v57 * v57));
}
arrOut_1[ix] = lv581;
arrOut_0[ix] = lv580;
}
}
| 06dcb133a8c8e8244a267de181fc46a33f4b9511.cu | #include <accelerate_cuda.h>
extern "C" __global__ void generate(const Int64 shIn0_2, const Int64 shIn0_1, const Int64 shIn0_0, const double* __restrict__ arrIn0_0, const Int64 shIn1_2, const Int64 shIn1_1, const Int64 shIn1_0, const double* __restrict__ arrIn1_5, const double* __restrict__ arrIn1_4, const double* __restrict__ arrIn1_3, const double* __restrict__ arrIn1_2, const double* __restrict__ arrIn1_1, const double* __restrict__ arrIn1_0, const Int64 shIn2_2, const Int64 shIn2_1, const Int64 shIn2_0, const double* __restrict__ arrIn2_3, const double* __restrict__ arrIn2_2, const double* __restrict__ arrIn2_1, const double* __restrict__ arrIn2_0, const Int64 shOut_2, const Int64 shOut_1, const Int64 shOut_0, double* __restrict__ arrOut_1, double* __restrict__ arrOut_0)
{
const int shapeSize = shOut_2 * (shOut_1 * shOut_0);
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 tmp_0 = ix;
const Int64 tmp_1 = tmp_0 / shOut_0;
const Int64 tmp_2 = tmp_1 / shOut_1;
const Int64 sh2 = tmp_2 % shOut_2;
const Int64 sh1 = tmp_1 % shOut_1;
const Int64 sh0 = tmp_0 % shOut_0;
const Word8 v1 = ({ const Int64 v0 = (sh2 * shIn2_1 + sh1) * shIn2_0 + sh0; ; arrIn2_1[v0]; }) > 0.0;
double lv580;
double lv581;
if (v1) {
lv581 = 0.0;
lv580 = 0.0;
} else {
const double v5 = ({ const Int64 v2 = (sh2 * shIn0_1 + sh1) * shIn0_0 + sh0; ; arrIn0_0[v2]; }) / (({ const Int64 v3 = (sh2 * shIn0_1 + sh1) * shIn0_0 + sh0; ; arrIn0_0[v3]; }) * ({ const Int64 v4 = (sh2 * shIn2_1 + sh1) * shIn2_0 + sh0; ; arrIn2_3[v4]; }));
const Int64 v6 = (sh2 * shIn1_1 + sh1) * shIn1_0 + sh0;
const double v7 = arrIn1_2[v6];
const double v8 = arrIn1_1[v6];
const double v9 = arrIn1_0[v6];
const Int64 v10 = (sh2 * shIn1_1 + sh1) * shIn1_0 + sh0;
const double v11 = arrIn1_5[v10];
const double v12 = arrIn1_4[v10];
const double v13 = arrIn1_3[v10];
const double v14 = v11 * v7;
const double v15 = v12 * v8;
const double v16 = v13 * v9;
const double v17 = fmin(0.0, v14);
const double v18 = fmin(0.0, v15);
const double v19 = fmin(0.0, v16);
const Int64 v20 = (Int64) 0;
const Int64 v21 = (Int64) 1;
const double v55 = fmin(2.0, fmax(0.0, ({ const double v22 = 1.0 / (1.0e-36 + v7); const double v27 = ({ const Int64 v23 = v21 + sh0; const Word8 v24 = v23 >= shIn0_0 || (sh1 >= shIn0_0 || sh2 >= shIn0_0); double lv260; double lv261; double lv262; if (v24) { lv262 = 0.0; lv261 = 0.0; lv260 = 0.0; } else { const Int64 v25 = (max(v20, sh2) * shIn1_1 + max(v20, sh1)) * shIn1_0 + max(v20, v23); lv262 = arrIn1_2[v25]; lv261 = arrIn1_1[v25]; lv260 = arrIn1_0[v25]; } ; lv262; }) * v22; ; fmin(({ const double v32 = ({ const Int64 v28 = (Int64) -1 + sh0; const Word8 v29 = v28 >= shIn0_0 || (sh1 >= shIn0_0 || sh2 >= shIn0_0); double lv310; double lv311; double lv312; if (v29) { lv312 = 0.0; lv311 = 0.0; lv310 = 0.0; } else { const Int64 v30 = (max(v20, sh2) * shIn1_1 + max(v20, sh1)) * shIn1_0 + max(v20, v28); lv312 = arrIn1_2[v30]; lv311 = arrIn1_1[v30]; lv310 = arrIn1_0[v30]; } ; lv312; }) * v22; ; fmin(v32, 0.5 * (v32 + v27)); }), v27); })));
const double v56 = fmin(2.0, fmax(0.0, ({ const double v33 = 1.0 / (1.0e-36 + v8); const double v38 = ({ const Int64 v34 = v21 + sh1; const Word8 v35 = sh0 >= shIn0_0 || (v34 >= shIn0_0 || sh2 >= shIn0_0); double lv370; double lv371; double lv372; if (v35) { lv372 = 0.0; lv371 = 0.0; lv370 = 0.0; } else { const Int64 v36 = (max(v20, sh2) * shIn1_1 + max(v20, v34)) * shIn1_0 + max(v20, sh0); lv372 = arrIn1_2[v36]; lv371 = arrIn1_1[v36]; lv370 = arrIn1_0[v36]; } ; lv371; }) * v33; ; fmin(({ const double v43 = ({ const Int64 v39 = (Int64) -1 + sh1; const Word8 v40 = sh0 >= shIn0_0 || (v39 >= shIn0_0 || sh2 >= shIn0_0); double lv420; double lv421; double lv422; if (v40) { lv422 = 0.0; lv421 = 0.0; lv420 = 0.0; } else { const Int64 v41 = (max(v20, sh2) * shIn1_1 + max(v20, v39)) * shIn1_0 + max(v20, sh0); lv422 = arrIn1_2[v41]; lv421 = arrIn1_1[v41]; lv420 = arrIn1_0[v41]; } ; lv421; }) * v33; ; fmin(v43, 0.5 * (v43 + v38)); }), v38); })));
const double v57 = fmin(2.0, fmax(0.0, ({ const double v44 = 1.0 / (1.0e-36 + v9); const double v49 = ({ const Int64 v45 = v21 + sh2; const Word8 v46 = sh0 >= shIn0_0 || (sh1 >= shIn0_0 || v45 >= shIn0_0); double lv480; double lv481; double lv482; if (v46) { lv482 = 0.0; lv481 = 0.0; lv480 = 0.0; } else { const Int64 v47 = (max(v20, v45) * shIn1_1 + max(v20, sh1)) * shIn1_0 + max(v20, sh0); lv482 = arrIn1_2[v47]; lv481 = arrIn1_1[v47]; lv480 = arrIn1_0[v47]; } ; lv480; }) * v44; ; fmin(({ const double v54 = ({ const Int64 v50 = (Int64) -1 + sh2; const Word8 v51 = sh0 >= shIn0_0 || (sh1 >= shIn0_0 || v50 >= shIn0_0); double lv530; double lv531; double lv532; if (v51) { lv532 = 0.0; lv531 = 0.0; lv530 = 0.0; } else { const Int64 v52 = (max(v20, v50) * shIn1_1 + max(v20, sh1)) * shIn1_0 + max(v20, sh0); lv532 = arrIn1_2[v52]; lv531 = arrIn1_1[v52]; lv530 = arrIn1_0[v52]; } ; lv530; }) * v44; ; fmin(v54, 0.5 * (v54 + v49)); }), v49); })));
lv581 = -(0.5 * v5 * (v17 * (1.0 - v55) + v18 * (1.0 - v56) + v19 * (1.0 - v57)));
lv580 = 0.6666666666666666 * v5 * (v17 * v17 * (1.0 - v55 * v55) + v18 * v18 * (1.0 - v56 * v56) + v19 * v19 * (1.0 - v57 * v57));
}
arrOut_1[ix] = lv581;
arrOut_0[ix] = lv580;
}
}
|
7bbcd2cc25f55ccd3a7815ce32be9e717c03893c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
const float LOG_MIN_VALUE = 0.00000001f;
Metrics::Metrics(LossType _loss_type, const std::vector<MetricsType>& metrics)
: measure_accuracy(false),
measure_categorical_crossentropy(false),
measure_sparse_categorical_crossentropy(false),
measure_mean_squared_error(false),
measure_root_mean_squared_error(false),
measure_mean_absolute_error(false),
loss_type(_loss_type)
{
for (size_t i = 0; i < metrics.size(); i++) {
switch (metrics[i]) {
case METRICS_ACCURACY:
measure_accuracy = true;
continue;
case METRICS_CATEGORICAL_CROSSENTROPY:
measure_categorical_crossentropy = true;
continue;
case METRICS_SPARSE_CATEGORICAL_CROSSENTROPY:
measure_sparse_categorical_crossentropy = true;
continue;
case METRICS_MEAN_SQUARED_ERROR:
measure_mean_squared_error = true;
continue;
case METRICS_ROOT_MEAN_SQUARED_ERROR:
measure_root_mean_squared_error = true;
continue;
case METRICS_MEAN_ABSOLUTE_ERROR:
measure_mean_absolute_error = true;
continue;
default:
fprintf(stderr, "Unrecogonized metrics type\n");
assert(false);
}
}
}
__global__
void update_metrics_sparse_label_kernel(
const float* logits,
const int* labels,
PerfMetrics* perf,
const Metrics metrics,
int num_samples,
int num_classes)
{
CUDA_KERNEL_LOOP(b, num_samples)
{
if (metrics.measure_accuracy) {
float max_val = -1.0f;
int my_label = -1;
for (int i = 0; i < num_classes; i++) {
float my_logit = logits[b*num_classes+i];
if (my_logit > max_val) {
max_val = my_logit;
my_label = i;
}
}
assert(my_label >= 0);
atomicAdd(&(perf->train_all), 1);
if (labels[b] == my_label)
atomicAdd(&(perf->train_correct), 1);
}
if (metrics.measure_sparse_categorical_crossentropy) {
float my_logit = max(logits[b*num_classes+labels[b]], LOG_MIN_VALUE);
atomicAdd(&(perf->sparse_cce_loss), -log(my_logit));
}
if (metrics.measure_mean_squared_error
|| metrics.measure_root_mean_squared_error
|| metrics.measure_mean_absolute_error)
{
float mse = 0.0f, mae = 0.0f;
for (int i = 0; i < num_classes; i++) {
float my_logit = logits[b*num_classes+i];
float my_label = (labels[b] == i) ? 1.0f : 0.0f;
mse += (my_logit - my_label) * (my_logit - my_label);
mae += abs(my_logit - my_label);
}
if (metrics.measure_mean_squared_error)
atomicAdd(&(perf->mse_loss), mse);
if (metrics.measure_root_mean_squared_error)
atomicAdd(&(perf->rmse_loss), sqrt(mse));
if (metrics.measure_mean_absolute_error)
atomicAdd(&(perf->mae_loss), mae);
}
}
}
__global__
void update_metrics_label_kernel(
const float* logits,
const float* labels,
PerfMetrics* perf,
const Metrics metrics,
int num_samples,
int num_classes)
{
CUDA_KERNEL_LOOP(b, num_samples)
{
atomicAdd(&(perf->train_all), 1);
if (metrics.measure_accuracy) {
if (num_classes == 1) {
// accuracy does not make sense when num_classes = 1
// we just return 100%
atomicAdd(&(perf->train_all), 1);
atomicAdd(&(perf->train_correct), 1);
} else {
float max_val = 0.0f;
int my_label = -1, true_label = -1;
for (int i = 0; i < num_classes; i++) {
if (my_label == -1 || logits[b*num_classes+i] > max_val) {
max_val = logits[b*num_classes+i];
my_label = i;
}
if (labels[b*num_classes+i] > 0.9f) {
assert(true_label == -1);
true_label = i;
}
}
//printf("[%d] logit(%.4lf) label(%.4lf) my_label(%d) num_classes(%d)\n", b, logits[b], labels[b], my_label, num_classes);
assert(my_label >= 0);
assert(true_label >= 0);
if (true_label == my_label)
atomicAdd(&(perf->train_correct), 1);
}
}
if (metrics.measure_categorical_crossentropy) {
float cce = 0.0f;
for (int i = 0; i < num_classes; i++) {
if (labels[b*num_classes+i] > 0.0f) {
float my_logit = max(logits[b*num_classes+i], LOG_MIN_VALUE);
cce += labels[b*num_classes+i] * -log(my_logit);
}
}
atomicAdd(&(perf->cce_loss), cce);
}
if (metrics.measure_mean_squared_error
|| metrics.measure_root_mean_squared_error
|| metrics.measure_mean_absolute_error)
{
float mse = 0.0f, mae = 0.0f;
for (int i = 0; i < num_classes; i++) {
float diff = logits[b*num_classes+i] - labels[b*num_classes+i];
mse += diff * diff;
mae += abs(diff);
}
if (metrics.measure_mean_squared_error)
atomicAdd(&(perf->mse_loss), mse);
if (metrics.measure_root_mean_squared_error)
atomicAdd(&(perf->rmse_loss), sqrt(mse));
if (metrics.measure_mean_absolute_error)
atomicAdd(&(perf->mae_loss), mae);
}
}
}
__host__
PerfMetrics Metrics::compute_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return compute_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
PerfMetrics invalid;
return invalid;
}
template<int NDIM>
__host__
PerfMetrics Metrics::compute_task_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Metrics* me = (Metrics*) task->args;
PerfMetrics* perf;
PerfMetrics perf_zc;
checkCUDA(hipMalloc(&perf, sizeof(PerfMetrics)));
checkCUDA(hipMemcpy(perf, &perf_zc, sizeof(PerfMetrics), hipMemcpyHostToDevice));
if (me->loss_type == LOSS_SPARSE_CATEGORICAL_CROSSENTROPY) {
TensorAccessorR<float, NDIM> acc_logit(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorR<int, NDIM> acc_label(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
int num_samples = acc_logit.rect.hi[NDIM-1] - acc_logit.rect.lo[NDIM-1] + 1;
int num_classes = acc_logit.rect.volume() / num_samples;
for (int i = 1; i < NDIM; i++) {
assert(acc_label.rect.hi[i] == acc_logit.rect.hi[i]);
assert(acc_label.rect.lo[i] == acc_logit.rect.lo[i]);
}
assert(acc_label.rect.lo[0] == acc_label.rect.hi[0]);
// Cannot measure categorical_crossentropy w/ sparse labels
// Use measure_sparse_categorical_crossentropy instead
assert(!me->measure_categorical_crossentropy);
hipLaunchKernelGGL(( update_metrics_sparse_label_kernel), dim3(GET_BLOCKS(num_samples)), dim3(CUDA_NUM_THREADS), 0, 0,
acc_logit.ptr, acc_label.ptr, perf, *me, num_samples, num_classes);
} else {
TensorAccessorR<float, NDIM> acc_logit(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorR<float, NDIM> acc_label(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
// other loss require label and logit have identical shape
assert(acc_logit.rect == acc_label.rect);
int num_samples = acc_logit.rect.hi[NDIM-1] - acc_logit.rect.lo[NDIM-1] + 1;
int num_classes = acc_logit.rect.volume() / num_samples;
// Use CUDA_NUM_THREADS may result in out of resources so we set #threads=256
hipLaunchKernelGGL(( update_metrics_label_kernel), dim3(GET_BLOCKS(num_samples)), dim3(256), 0, 0,
acc_logit.ptr, acc_label.ptr, perf, *me, num_samples, num_classes);
}
checkCUDA(hipMemcpy(&perf_zc, perf, sizeof(PerfMetrics), hipMemcpyDeviceToHost));
checkCUDA(hipFree(perf));
return perf_zc;
}
void Metrics::compute(FFModel* model,
const Tensor* logit,
const Tensor* label)
{
assert(logit->numDim == label->numDim);
int dim = logit->numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
compute_with_dim<DIM>(model, logit, label); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
assert(false);
}
}
}
template<int NDIM>
void Metrics::compute_with_dim(FFModel* model,
const Tensor* logit,
const Tensor* label)
{
// Use the same parallel strategy as the owner of logit
std::string pcname = logit->owner_op->name;
IndexSpaceT<NDIM> task_is = IndexSpaceT<NDIM>(
model->get_or_create_task_is(NDIM, pcname));
Context ctx = model->config.lg_ctx;
Runtime* runtime = model->config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
Rect<NDIM> logit_rect = runtime->get_index_partition_color_space(
ctx, logit->part.get_index_partition());
Rect<NDIM> label_rect = runtime->get_index_partition_color_space(
ctx, label->part.get_index_partition());
if((logit_rect != part_rect) || (label_rect != part_rect)) {
fprintf(stderr, "Encounter inconsistency in parallelizing loss computation\n");
assert(false);
}
ArgumentMap argmap;
IndexLauncher launcher(METRICS_COMP_TASK_ID, task_is,
TaskArgument(this, sizeof(Metrics)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(pcname));
launcher.add_region_requirement(
RegionRequirement(logit->part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, logit->region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(label->part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, label->region));
launcher.add_field(1, FID_DATA);
FutureMap new_metrics = runtime->execute_index_space(ctx, launcher);
// Update metrics
TaskLauncher metrics_task(UPDATE_METRICS_TASK_ID, TaskArgument(this, sizeof(Metrics)));
metrics_task.add_future(model->current_metrics);
for (PointInRectIterator<NDIM> it(part_rect); it(); it++) {
metrics_task.add_future(new_metrics[*it]);
}
model->current_metrics = runtime->execute_task(ctx, metrics_task);
}
| 7bbcd2cc25f55ccd3a7815ce32be9e717c03893c.cu | /* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
const float LOG_MIN_VALUE = 0.00000001f;
Metrics::Metrics(LossType _loss_type, const std::vector<MetricsType>& metrics)
: measure_accuracy(false),
measure_categorical_crossentropy(false),
measure_sparse_categorical_crossentropy(false),
measure_mean_squared_error(false),
measure_root_mean_squared_error(false),
measure_mean_absolute_error(false),
loss_type(_loss_type)
{
for (size_t i = 0; i < metrics.size(); i++) {
switch (metrics[i]) {
case METRICS_ACCURACY:
measure_accuracy = true;
continue;
case METRICS_CATEGORICAL_CROSSENTROPY:
measure_categorical_crossentropy = true;
continue;
case METRICS_SPARSE_CATEGORICAL_CROSSENTROPY:
measure_sparse_categorical_crossentropy = true;
continue;
case METRICS_MEAN_SQUARED_ERROR:
measure_mean_squared_error = true;
continue;
case METRICS_ROOT_MEAN_SQUARED_ERROR:
measure_root_mean_squared_error = true;
continue;
case METRICS_MEAN_ABSOLUTE_ERROR:
measure_mean_absolute_error = true;
continue;
default:
fprintf(stderr, "Unrecogonized metrics type\n");
assert(false);
}
}
}
__global__
void update_metrics_sparse_label_kernel(
const float* logits,
const int* labels,
PerfMetrics* perf,
const Metrics metrics,
int num_samples,
int num_classes)
{
CUDA_KERNEL_LOOP(b, num_samples)
{
if (metrics.measure_accuracy) {
float max_val = -1.0f;
int my_label = -1;
for (int i = 0; i < num_classes; i++) {
float my_logit = logits[b*num_classes+i];
if (my_logit > max_val) {
max_val = my_logit;
my_label = i;
}
}
assert(my_label >= 0);
atomicAdd(&(perf->train_all), 1);
if (labels[b] == my_label)
atomicAdd(&(perf->train_correct), 1);
}
if (metrics.measure_sparse_categorical_crossentropy) {
float my_logit = max(logits[b*num_classes+labels[b]], LOG_MIN_VALUE);
atomicAdd(&(perf->sparse_cce_loss), -log(my_logit));
}
if (metrics.measure_mean_squared_error
|| metrics.measure_root_mean_squared_error
|| metrics.measure_mean_absolute_error)
{
float mse = 0.0f, mae = 0.0f;
for (int i = 0; i < num_classes; i++) {
float my_logit = logits[b*num_classes+i];
float my_label = (labels[b] == i) ? 1.0f : 0.0f;
mse += (my_logit - my_label) * (my_logit - my_label);
mae += abs(my_logit - my_label);
}
if (metrics.measure_mean_squared_error)
atomicAdd(&(perf->mse_loss), mse);
if (metrics.measure_root_mean_squared_error)
atomicAdd(&(perf->rmse_loss), sqrt(mse));
if (metrics.measure_mean_absolute_error)
atomicAdd(&(perf->mae_loss), mae);
}
}
}
__global__
void update_metrics_label_kernel(
const float* logits,
const float* labels,
PerfMetrics* perf,
const Metrics metrics,
int num_samples,
int num_classes)
{
CUDA_KERNEL_LOOP(b, num_samples)
{
atomicAdd(&(perf->train_all), 1);
if (metrics.measure_accuracy) {
if (num_classes == 1) {
// accuracy does not make sense when num_classes = 1
// we just return 100%
atomicAdd(&(perf->train_all), 1);
atomicAdd(&(perf->train_correct), 1);
} else {
float max_val = 0.0f;
int my_label = -1, true_label = -1;
for (int i = 0; i < num_classes; i++) {
if (my_label == -1 || logits[b*num_classes+i] > max_val) {
max_val = logits[b*num_classes+i];
my_label = i;
}
if (labels[b*num_classes+i] > 0.9f) {
assert(true_label == -1);
true_label = i;
}
}
//printf("[%d] logit(%.4lf) label(%.4lf) my_label(%d) num_classes(%d)\n", b, logits[b], labels[b], my_label, num_classes);
assert(my_label >= 0);
assert(true_label >= 0);
if (true_label == my_label)
atomicAdd(&(perf->train_correct), 1);
}
}
if (metrics.measure_categorical_crossentropy) {
float cce = 0.0f;
for (int i = 0; i < num_classes; i++) {
if (labels[b*num_classes+i] > 0.0f) {
float my_logit = max(logits[b*num_classes+i], LOG_MIN_VALUE);
cce += labels[b*num_classes+i] * -log(my_logit);
}
}
atomicAdd(&(perf->cce_loss), cce);
}
if (metrics.measure_mean_squared_error
|| metrics.measure_root_mean_squared_error
|| metrics.measure_mean_absolute_error)
{
float mse = 0.0f, mae = 0.0f;
for (int i = 0; i < num_classes; i++) {
float diff = logits[b*num_classes+i] - labels[b*num_classes+i];
mse += diff * diff;
mae += abs(diff);
}
if (metrics.measure_mean_squared_error)
atomicAdd(&(perf->mse_loss), mse);
if (metrics.measure_root_mean_squared_error)
atomicAdd(&(perf->rmse_loss), sqrt(mse));
if (metrics.measure_mean_absolute_error)
atomicAdd(&(perf->mae_loss), mae);
}
}
}
__host__
PerfMetrics Metrics::compute_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return compute_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
PerfMetrics invalid;
return invalid;
}
template<int NDIM>
__host__
PerfMetrics Metrics::compute_task_with_dim(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Metrics* me = (Metrics*) task->args;
PerfMetrics* perf;
PerfMetrics perf_zc;
checkCUDA(cudaMalloc(&perf, sizeof(PerfMetrics)));
checkCUDA(cudaMemcpy(perf, &perf_zc, sizeof(PerfMetrics), cudaMemcpyHostToDevice));
if (me->loss_type == LOSS_SPARSE_CATEGORICAL_CROSSENTROPY) {
TensorAccessorR<float, NDIM> acc_logit(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorR<int, NDIM> acc_label(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
int num_samples = acc_logit.rect.hi[NDIM-1] - acc_logit.rect.lo[NDIM-1] + 1;
int num_classes = acc_logit.rect.volume() / num_samples;
for (int i = 1; i < NDIM; i++) {
assert(acc_label.rect.hi[i] == acc_logit.rect.hi[i]);
assert(acc_label.rect.lo[i] == acc_logit.rect.lo[i]);
}
assert(acc_label.rect.lo[0] == acc_label.rect.hi[0]);
// Cannot measure categorical_crossentropy w/ sparse labels
// Use measure_sparse_categorical_crossentropy instead
assert(!me->measure_categorical_crossentropy);
update_metrics_sparse_label_kernel<<<GET_BLOCKS(num_samples), CUDA_NUM_THREADS>>>(
acc_logit.ptr, acc_label.ptr, perf, *me, num_samples, num_classes);
} else {
TensorAccessorR<float, NDIM> acc_logit(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorR<float, NDIM> acc_label(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
// other loss require label and logit have identical shape
assert(acc_logit.rect == acc_label.rect);
int num_samples = acc_logit.rect.hi[NDIM-1] - acc_logit.rect.lo[NDIM-1] + 1;
int num_classes = acc_logit.rect.volume() / num_samples;
// Use CUDA_NUM_THREADS may result in out of resources so we set #threads=256
update_metrics_label_kernel<<<GET_BLOCKS(num_samples), 256>>>(
acc_logit.ptr, acc_label.ptr, perf, *me, num_samples, num_classes);
}
checkCUDA(cudaMemcpy(&perf_zc, perf, sizeof(PerfMetrics), cudaMemcpyDeviceToHost));
checkCUDA(cudaFree(perf));
return perf_zc;
}
void Metrics::compute(FFModel* model,
const Tensor* logit,
const Tensor* label)
{
assert(logit->numDim == label->numDim);
int dim = logit->numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
compute_with_dim<DIM>(model, logit, label); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
assert(false);
}
}
}
template<int NDIM>
void Metrics::compute_with_dim(FFModel* model,
const Tensor* logit,
const Tensor* label)
{
// Use the same parallel strategy as the owner of logit
std::string pcname = logit->owner_op->name;
IndexSpaceT<NDIM> task_is = IndexSpaceT<NDIM>(
model->get_or_create_task_is(NDIM, pcname));
Context ctx = model->config.lg_ctx;
Runtime* runtime = model->config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
Rect<NDIM> logit_rect = runtime->get_index_partition_color_space(
ctx, logit->part.get_index_partition());
Rect<NDIM> label_rect = runtime->get_index_partition_color_space(
ctx, label->part.get_index_partition());
if((logit_rect != part_rect) || (label_rect != part_rect)) {
fprintf(stderr, "Encounter inconsistency in parallelizing loss computation\n");
assert(false);
}
ArgumentMap argmap;
IndexLauncher launcher(METRICS_COMP_TASK_ID, task_is,
TaskArgument(this, sizeof(Metrics)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(pcname));
launcher.add_region_requirement(
RegionRequirement(logit->part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, logit->region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(label->part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, label->region));
launcher.add_field(1, FID_DATA);
FutureMap new_metrics = runtime->execute_index_space(ctx, launcher);
// Update metrics
TaskLauncher metrics_task(UPDATE_METRICS_TASK_ID, TaskArgument(this, sizeof(Metrics)));
metrics_task.add_future(model->current_metrics);
for (PointInRectIterator<NDIM> it(part_rect); it(); it++) {
metrics_task.add_future(new_metrics[*it]);
}
model->current_metrics = runtime->execute_task(ctx, metrics_task);
}
|
1833e24e42aa0564b023bdcc008e91d78feeab59.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE.txt for terms of usage.
// See CITATION.txt for citation guidelines if you use this code for scientific publications.
//
// Author: Giovanni Balduzzi ([email protected])
//
// This file implements G4Helper::set.
#include "dca/phys/dca_step/cluster_solver/shared_tools/accumulation/tp/g4_helper.cuh"
#include <algorithm>
#include <array>
#include <mutex>
#include <stdexcept>
namespace dca {
namespace phys {
namespace solver {
namespace accumulator {
namespace details {
// dca::phys::solver::accumulator::details::
__device__ __constant__ G4Helper g4_helper;
void G4Helper::set(int nb, int nk, int nw_pos, const std::vector<int>& delta_k,
const std::vector<int>& delta_w, const int* add_k, int lda, const int* sub_k,
int lds, int k0) {
static std::once_flag flag;
std::call_once(flag, [=]() {
G4Helper host_helper;
host_helper.lda_ = lda;
host_helper.lds_ = lds;
host_helper.nw_pos_ = nw_pos;
host_helper.k0_ = k0;
host_helper.ext_size_ = 0;
for (const int idx : delta_w)
host_helper.ext_size_ = ::max(host_helper.ext_size_, std::abs(idx));
const int nb4 = nb * nb * nb * nb;
const int nk3 = nk * nk * delta_k.size();
const int nw = 2 * nw_pos;
const std::array<int, 10> steps{1,
nb,
nb * nb,
nb * nb * nb,
nb4,
nb4 * nk,
nb4 * nk * nk,
nb4 * nk3,
nb4 * nk3 * nw,
nb4 * nk3 * nw * nw};
std::copy_n(steps.data(), steps.size(), host_helper.sbdm_steps_);
hipMalloc(&host_helper.add_matrix_, sizeof(int) * lda * nk);
hipMemcpy(host_helper.add_matrix_, add_k, sizeof(int) * lda * nk, hipMemcpyHostToDevice);
hipMalloc(&host_helper.sub_matrix_, sizeof(int) * lds * nk);
hipMemcpy(host_helper.sub_matrix_, sub_k, sizeof(int) * lds * nk, hipMemcpyHostToDevice);
hipMalloc(&host_helper.w_ex_indices_, sizeof(int) * delta_w.size());
hipMemcpy(host_helper.w_ex_indices_, delta_w.data(), sizeof(int) * delta_w.size(),
hipMemcpyHostToDevice);
hipMalloc(&host_helper.k_ex_indices_, sizeof(int) * delta_k.size());
hipMemcpy(host_helper.k_ex_indices_, delta_k.data(), sizeof(int) * delta_k.size(),
hipMemcpyHostToDevice);
hipMemcpyToSymbol(g4_helper, &host_helper, sizeof(G4Helper));
});
}
} // namespace details
} // namespace accumulator
} // namespace solver
} // namespace phys
} // namespace dca
| 1833e24e42aa0564b023bdcc008e91d78feeab59.cu | // Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE.txt for terms of usage.
// See CITATION.txt for citation guidelines if you use this code for scientific publications.
//
// Author: Giovanni Balduzzi ([email protected])
//
// This file implements G4Helper::set.
#include "dca/phys/dca_step/cluster_solver/shared_tools/accumulation/tp/g4_helper.cuh"
#include <algorithm>
#include <array>
#include <mutex>
#include <stdexcept>
namespace dca {
namespace phys {
namespace solver {
namespace accumulator {
namespace details {
// dca::phys::solver::accumulator::details::
__device__ __constant__ G4Helper g4_helper;
void G4Helper::set(int nb, int nk, int nw_pos, const std::vector<int>& delta_k,
const std::vector<int>& delta_w, const int* add_k, int lda, const int* sub_k,
int lds, int k0) {
static std::once_flag flag;
std::call_once(flag, [=]() {
G4Helper host_helper;
host_helper.lda_ = lda;
host_helper.lds_ = lds;
host_helper.nw_pos_ = nw_pos;
host_helper.k0_ = k0;
host_helper.ext_size_ = 0;
for (const int idx : delta_w)
host_helper.ext_size_ = std::max(host_helper.ext_size_, std::abs(idx));
const int nb4 = nb * nb * nb * nb;
const int nk3 = nk * nk * delta_k.size();
const int nw = 2 * nw_pos;
const std::array<int, 10> steps{1,
nb,
nb * nb,
nb * nb * nb,
nb4,
nb4 * nk,
nb4 * nk * nk,
nb4 * nk3,
nb4 * nk3 * nw,
nb4 * nk3 * nw * nw};
std::copy_n(steps.data(), steps.size(), host_helper.sbdm_steps_);
cudaMalloc(&host_helper.add_matrix_, sizeof(int) * lda * nk);
cudaMemcpy(host_helper.add_matrix_, add_k, sizeof(int) * lda * nk, cudaMemcpyHostToDevice);
cudaMalloc(&host_helper.sub_matrix_, sizeof(int) * lds * nk);
cudaMemcpy(host_helper.sub_matrix_, sub_k, sizeof(int) * lds * nk, cudaMemcpyHostToDevice);
cudaMalloc(&host_helper.w_ex_indices_, sizeof(int) * delta_w.size());
cudaMemcpy(host_helper.w_ex_indices_, delta_w.data(), sizeof(int) * delta_w.size(),
cudaMemcpyHostToDevice);
cudaMalloc(&host_helper.k_ex_indices_, sizeof(int) * delta_k.size());
cudaMemcpy(host_helper.k_ex_indices_, delta_k.data(), sizeof(int) * delta_k.size(),
cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(g4_helper, &host_helper, sizeof(G4Helper));
});
}
} // namespace details
} // namespace accumulator
} // namespace solver
} // namespace phys
} // namespace dca
|
948b01241ab0c9bf89192ed26cacc19e514195ce.hip | // !!! This is a file automatically generated by hipify!!!
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//#include <time.h>
//
//void initialize_data(float * ip, int size)
//{
// time_t t;
// srand((unsigned) time(&t));
//
// for (size_t i = 0; i < size; i++)
// {
// ip[i] = (float)(rand() & 0xFF) / 10.0f;
// }
//}
//
//__global__ void sum_array(float * a, float * b, float * c)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
// printf("a =%f b = %f c = %f \n",a[i],b[i],c[i]);
//}
//
////int main()
////{
//// int element_Count = 32;
////
//// size_t number_bytes = element_Count * sizeof(float);
////
//// float *h_a, *h_b, *host_ref, *gpu_ref;
////
//// h_a = (float *)malloc(number_bytes);
//// h_b = (float *)malloc(number_bytes);
//// host_ref = (float *)malloc(number_bytes);
//// gpu_ref = (float *)malloc(number_bytes);
////
//// initialize_data(h_a,element_Count);
//// initialize_data(h_b, element_Count);
////
//// memset(host_ref,0,number_bytes);
//// memset(gpu_ref,0,number_bytes);
////
//// float *d_a, *d_b, *d_c;
//// hipMalloc((float **)&d_a,number_bytes);
//// hipMalloc((float **)&d_b, number_bytes);
//// hipMalloc((float **)&d_c, number_bytes);
////
//// hipMemcpy(d_a,h_a,number_bytes,hipMemcpyHostToDevice);
//// hipMemcpy(d_b, h_b ,number_bytes, hipMemcpyHostToDevice);
////
//// dim3 block(element_Count);
//// dim3 grid(element_Count/block.x);
////
//// sum_array << <grid,block >> > (d_a,d_b,d_c);
////
//// hipMemcpy(gpu_ref,d_c,number_bytes,hipMemcpyDeviceToHost);
////
//// hipFree(d_a);
//// hipFree(d_b);
//// hipFree(d_c);
////
//// free(h_a);
//// free(h_b);
//// free(host_ref);
//// free(gpu_ref);
////
//// system("pause");
//// return 0;
////} | 948b01241ab0c9bf89192ed26cacc19e514195ce.cu | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//#include <time.h>
//
//void initialize_data(float * ip, int size)
//{
// time_t t;
// srand((unsigned) time(&t));
//
// for (size_t i = 0; i < size; i++)
// {
// ip[i] = (float)(rand() & 0xFF) / 10.0f;
// }
//}
//
//__global__ void sum_array(float * a, float * b, float * c)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
// printf("a =%f b = %f c = %f \n",a[i],b[i],c[i]);
//}
//
////int main()
////{
//// int element_Count = 32;
////
//// size_t number_bytes = element_Count * sizeof(float);
////
//// float *h_a, *h_b, *host_ref, *gpu_ref;
////
//// h_a = (float *)malloc(number_bytes);
//// h_b = (float *)malloc(number_bytes);
//// host_ref = (float *)malloc(number_bytes);
//// gpu_ref = (float *)malloc(number_bytes);
////
//// initialize_data(h_a,element_Count);
//// initialize_data(h_b, element_Count);
////
//// memset(host_ref,0,number_bytes);
//// memset(gpu_ref,0,number_bytes);
////
//// float *d_a, *d_b, *d_c;
//// cudaMalloc((float **)&d_a,number_bytes);
//// cudaMalloc((float **)&d_b, number_bytes);
//// cudaMalloc((float **)&d_c, number_bytes);
////
//// cudaMemcpy(d_a,h_a,number_bytes,cudaMemcpyHostToDevice);
//// cudaMemcpy(d_b, h_b ,number_bytes, cudaMemcpyHostToDevice);
////
//// dim3 block(element_Count);
//// dim3 grid(element_Count/block.x);
////
//// sum_array << <grid,block >> > (d_a,d_b,d_c);
////
//// cudaMemcpy(gpu_ref,d_c,number_bytes,cudaMemcpyDeviceToHost);
////
//// cudaFree(d_a);
//// cudaFree(d_b);
//// cudaFree(d_c);
////
//// free(h_a);
//// free(h_b);
//// free(host_ref);
//// free(gpu_ref);
////
//// system("pause");
//// return 0;
////} |
e24d060447270ea99407a47df0ff79ef505c74e9.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <iostream>
#include <cstddef>
#include <iomanip>
#include <chrono>
int main()
{
size_t cols = 1 << 10;
size_t rows = 1 << 10;
size_t N = cols * rows;
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
hipblasHandle_t handle;
hipblasCreate(&handle);
float_t* matrix;
hipHostMalloc((void**)&matrix, N * sizeof(float_t));
for (int i = 0; i < N; ++i)
matrix[i] = static_cast<float_t>(i);
float_t* matrix_in_dev;
hipMalloc((void**)&matrix_in_dev, N * sizeof(float_t));
float_t* matrix_out_dev;
hipMalloc((void**)&matrix_out_dev, N * sizeof(float_t));
hipblasSetMatrix(rows, cols, sizeof(float_t), matrix, rows, matrix_in_dev, rows);
float_t alpha = 1.; // change sample
float_t beta = 0.;
hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, cols, rows, &alpha, matrix_in_dev, rows, &beta, matrix_in_dev, rows, matrix_out_dev, cols);
hipblasGetMatrix(rows, cols, sizeof(float_t), matrix_out_dev, rows, matrix, rows);
hipStreamSynchronize(nullptr);
std::chrono::high_resolution_clock::time_point stop = std::chrono::high_resolution_clock::now();
std::chrono::duration<double_t> time_span = std::chrono::duration_cast<std::chrono::duration<double_t>>(stop - start);
// for (int i = 0; i < rows; ++i)
// {
// for (int j = 0; j < cols; ++j)
// std::cout << std::setw(10) << static_cast<float>(matrix[j + i * rows]) << "\t";
// std::cout << "\n";
// }
hipHostFree(matrix);
hipFree(matrix_in_dev);
hipFree(matrix_out_dev);
hipblasDestroy(handle);
std::cout << "Matrix transpose (s) " << " - " << time_span.count() << "\n";
hipblasHandle_t handle1;
hipblasCreate(&handle1);
start = std::chrono::high_resolution_clock::now();
float_t* vecA;
hipHostMalloc((void**)&vecA, N * sizeof(float_t));
float_t* vecB;
hipHostMalloc((void**)&vecB, N * sizeof(float_t));
for (int i = 0; i < N; ++i)
{
vecA[i] = (float_t)i;
vecB[i] = (float_t)(i * 2 - 1);
}
float_t* aDev;
hipMalloc((void**)&aDev, N * sizeof(float_t));
float_t* bDev;
hipMalloc((void**)&bDev, N * sizeof(float_t));
hipblasSetMatrix(N, 1, sizeof(float_t), vecA, N, aDev, N);
hipblasSetMatrix(N, 1, sizeof(float_t), vecB, N, bDev, N);
alpha = 2.25;
hipblasSaxpy(handle1, N, &alpha, aDev, 1, bDev, 1);
hipblasGetMatrix(N, 1, sizeof(float_t), bDev, N, vecB, N);
hipStreamSynchronize(nullptr);
//for (int i = 0; i < N; ++i)
// printf("%f\n", vecB[i]);
hipblasDestroy(handle1);
hipHostFree(vecA);
hipHostFree(vecB);
hipFree(aDev);
hipFree(bDev);
stop = std::chrono::high_resolution_clock::now();
time_span = std::chrono::duration_cast<std::chrono::duration<double_t>>(stop - start);
std::cout << "Saxpy time (s) " << " - " << time_span.count() << "\n";
} | e24d060447270ea99407a47df0ff79ef505c74e9.cu | #include <cublas_v2.h>
#include <iostream>
#include <cstddef>
#include <iomanip>
#include <chrono>
int main()
{
size_t cols = 1 << 10;
size_t rows = 1 << 10;
size_t N = cols * rows;
std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
cublasHandle_t handle;
cublasCreate(&handle);
float_t* matrix;
cudaMallocHost((void**)&matrix, N * sizeof(float_t));
for (int i = 0; i < N; ++i)
matrix[i] = static_cast<float_t>(i);
float_t* matrix_in_dev;
cudaMalloc((void**)&matrix_in_dev, N * sizeof(float_t));
float_t* matrix_out_dev;
cudaMalloc((void**)&matrix_out_dev, N * sizeof(float_t));
cublasSetMatrix(rows, cols, sizeof(float_t), matrix, rows, matrix_in_dev, rows);
float_t alpha = 1.; // change sample
float_t beta = 0.;
cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, cols, rows, &alpha, matrix_in_dev, rows, &beta, matrix_in_dev, rows, matrix_out_dev, cols);
cublasGetMatrix(rows, cols, sizeof(float_t), matrix_out_dev, rows, matrix, rows);
cudaStreamSynchronize(nullptr);
std::chrono::high_resolution_clock::time_point stop = std::chrono::high_resolution_clock::now();
std::chrono::duration<double_t> time_span = std::chrono::duration_cast<std::chrono::duration<double_t>>(stop - start);
// for (int i = 0; i < rows; ++i)
// {
// for (int j = 0; j < cols; ++j)
// std::cout << std::setw(10) << static_cast<float>(matrix[j + i * rows]) << "\t";
// std::cout << "\n";
// }
cudaFreeHost(matrix);
cudaFree(matrix_in_dev);
cudaFree(matrix_out_dev);
cublasDestroy(handle);
std::cout << "Matrix transpose (s) " << " - " << time_span.count() << "\n";
cublasHandle_t handle1;
cublasCreate(&handle1);
start = std::chrono::high_resolution_clock::now();
float_t* vecA;
cudaMallocHost((void**)&vecA, N * sizeof(float_t));
float_t* vecB;
cudaMallocHost((void**)&vecB, N * sizeof(float_t));
for (int i = 0; i < N; ++i)
{
vecA[i] = (float_t)i;
vecB[i] = (float_t)(i * 2 - 1);
}
float_t* aDev;
cudaMalloc((void**)&aDev, N * sizeof(float_t));
float_t* bDev;
cudaMalloc((void**)&bDev, N * sizeof(float_t));
cublasSetMatrix(N, 1, sizeof(float_t), vecA, N, aDev, N);
cublasSetMatrix(N, 1, sizeof(float_t), vecB, N, bDev, N);
alpha = 2.25;
cublasSaxpy(handle1, N, &alpha, aDev, 1, bDev, 1);
cublasGetMatrix(N, 1, sizeof(float_t), bDev, N, vecB, N);
cudaStreamSynchronize(nullptr);
//for (int i = 0; i < N; ++i)
// printf("%f\n", vecB[i]);
cublasDestroy(handle1);
cudaFreeHost(vecA);
cudaFreeHost(vecB);
cudaFree(aDev);
cudaFree(bDev);
stop = std::chrono::high_resolution_clock::now();
time_span = std::chrono::duration_cast<std::chrono::duration<double_t>>(stop - start);
std::cout << "Saxpy time (s) " << " - " << time_span.count() << "\n";
} |
b1902f9436eb8ff679647ec702108e31d26df22f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by egi on 11/3/19.
//
#include "csr_adaptive_spmv.h"
#include "reduce_hip.cuh"
#define NNZ_PER_WG 64u ///< Should be power of two
template <typename data_type>
__global__ void fill_vector (unsigned int n, data_type *vec, data_type value)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
vec[i] = value;
}
__device__ unsigned int prev_power_of_2 (unsigned int n)
{
while (n & n - 1)
n = n & n - 1;
return n;
}
template <typename data_type>
__global__ void csr_adaptive_spmv_kernel (
const unsigned int n_rows,
const unsigned int *col_ids,
const unsigned int *row_ptr,
const unsigned int *row_blocks,
const data_type *data,
const data_type *x,
data_type *y)
{
const unsigned int block_row_begin = row_blocks[blockIdx.x];
const unsigned int block_row_end = row_blocks[blockIdx.x + 1];
const unsigned int nnz = row_ptr[block_row_end] - row_ptr[block_row_begin];
__shared__ data_type cache[NNZ_PER_WG];
if (block_row_end - block_row_begin > 1)
{
/// CSR-Stream case
const unsigned int i = threadIdx.x;
const unsigned int block_data_begin = row_ptr[block_row_begin];
const unsigned int thread_data_begin = block_data_begin + i;
if (i < nnz)
cache[i] = data[thread_data_begin] * x[col_ids[thread_data_begin]];
__syncthreads ();
const unsigned int threads_for_reduction = prev_power_of_2 (blockDim.x / (block_row_end - block_row_begin));
if (threads_for_reduction > 1)
{
/// Reduce all non zeroes of row by multiple thread
const unsigned int thread_in_block = i % threads_for_reduction;
const unsigned int local_row = block_row_begin + i / threads_for_reduction;
data_type dot = 0.0;
if (local_row < block_row_end)
{
const unsigned int local_first_element = row_ptr[local_row] - row_ptr[block_row_begin];
const unsigned int local_last_element = row_ptr[local_row + 1] - row_ptr[block_row_begin];
for (unsigned int local_element = local_first_element + thread_in_block;
local_element < local_last_element;
local_element += threads_for_reduction)
{
dot += cache[local_element];
}
}
__syncthreads ();
cache[i] = dot;
/// Now each row has threads_for_reduction values in cache
for (int j = threads_for_reduction / 2; j > 0; j /= 2)
{
/// Reduce for each row
__syncthreads ();
const bool use_result = thread_in_block < j && i + j < NNZ_PER_WG;
if (use_result)
dot += cache[i + j];
__syncthreads ();
if (use_result)
cache[i] = dot;
}
if (thread_in_block == 0 && local_row < block_row_end)
y[local_row] = dot;
}
else
{
/// Reduce all non zeroes of row by single thread
unsigned int local_row = block_row_begin + i;
while (local_row < block_row_end)
{
data_type dot = 0.0;
for (unsigned int j = row_ptr[local_row] - block_data_begin;
j < row_ptr[local_row + 1] - block_data_begin;
j++)
{
dot += cache[j];
}
y[local_row] = dot;
local_row += NNZ_PER_WG;
}
}
}
else
{
const unsigned int row = block_row_begin;
const unsigned int warp_id = threadIdx.x / 32;
const unsigned int lane = threadIdx.x % 32;
data_type dot = 0;
if (nnz <= 64 || NNZ_PER_WG <= 32)
{
/// CSR-Vector case
if (row < n_rows)
{
const unsigned int row_start = row_ptr[row];
const unsigned int row_end = row_ptr[row + 1];
for (unsigned int element = row_start + lane; element < row_end; element += 32)
dot += data[element] * x[col_ids[element]];
}
dot = warp_reduce (dot);
if (lane == 0 && warp_id == 0 && row < n_rows)
{
y[row] = dot;
}
}
else
{
/// CSR-VectorL case
if (row < n_rows)
{
const unsigned int row_start = row_ptr[row];
const unsigned int row_end = row_ptr[row + 1];
for (unsigned int element = row_start + threadIdx.x; element < row_end; element += blockDim.x)
dot += data[element] * x[col_ids[element]];
}
dot = warp_reduce (dot);
if (lane == 0)
cache[warp_id] = dot;
__syncthreads ();
if (warp_id == 0)
{
dot = 0.0;
for (unsigned int element = lane; element < blockDim.x / 32; element += 32)
dot += cache[element];
dot = warp_reduce (dot);
if (lane == 0 && row < n_rows)
{
y[row] = dot;
}
}
}
}
}
unsigned int
fill_row_blocks (
bool fill,
unsigned int rows_count,
const unsigned int *row_ptr,
unsigned int *row_blocks
)
{
if (fill)
row_blocks[0] = 0;
int last_i = 0;
int current_wg = 1;
unsigned int nnz_sum = 0;
for (int i = 1; i <= rows_count; i++)
{
nnz_sum += row_ptr[i] - row_ptr[i - 1];
if (nnz_sum == NNZ_PER_WG)
{
last_i = i;
if (fill)
row_blocks[current_wg] = i;
current_wg++;
nnz_sum = 0;
}
else if (nnz_sum > NNZ_PER_WG)
{
if (i - last_i > 1)
{
if (fill)
row_blocks[current_wg] = i - 1;
current_wg++;
i--;
}
else
{
if (fill)
row_blocks[current_wg] = i;
current_wg++;
}
last_i = i;
nnz_sum = 0;
}
else if (i - last_i > NNZ_PER_WG)
{
last_i = i;
if (fill)
row_blocks[current_wg] = i;
current_wg++;
nnz_sum = 0;
}
}
if (fill)
row_blocks[current_wg] = rows_count;
return current_wg;
}
template <typename data_type>
measurement_class gpu_csr_adaptive_spmv (
const csr_matrix_class<data_type> &matrix,
resizable_gpu_memory<data_type> &A,
resizable_gpu_memory<unsigned int> &col_ids,
resizable_gpu_memory<unsigned int> &row_ptr,
resizable_gpu_memory<data_type> &x,
resizable_gpu_memory<data_type> &y,
data_type*reusable_vector,
const data_type*reference_y)
{
auto &meta = matrix.meta;
const size_t A_size = matrix.get_matrix_size ();
const size_t col_ids_size = matrix.meta.non_zero_count;
const size_t row_ptr_size = matrix.meta.rows_count + 1;
const size_t x_size = matrix.meta.cols_count;
const size_t y_size = matrix.meta.rows_count;
A.resize (A_size);
col_ids.resize (col_ids_size);
row_ptr.resize (row_ptr_size);
x.resize (x_size);
y.resize (y_size);
hipMemcpy (A.get (), matrix.data.get (), A_size * sizeof (data_type), hipMemcpyHostToDevice);
hipMemcpy (col_ids.get (), matrix.columns.get (), col_ids_size * sizeof (unsigned int), hipMemcpyHostToDevice);
hipMemcpy (row_ptr.get (), matrix.row_ptr.get (), row_ptr_size * sizeof (unsigned int), hipMemcpyHostToDevice);
{
dim3 block_size = dim3 (512);
dim3 grid_size {};
grid_size.x = (x_size + block_size.x - 1) / block_size.x;
hipLaunchKernelGGL(( fill_vector<data_type>), dim3(grid_size), dim3(block_size), 0, 0, x_size, x.get (), 1.0);
grid_size.y = (y_size + block_size.x - 1) / block_size.x;
hipLaunchKernelGGL(( fill_vector<data_type>), dim3(grid_size), dim3(block_size), 0, 0, y_size, y.get (), 0.0);
}
// fill delimiters
const unsigned int blocks_count = fill_row_blocks (false, meta.rows_count, matrix.row_ptr.get (), nullptr);
std::unique_ptr<unsigned int[]> row_blocks(new unsigned int[blocks_count + 1]);
fill_row_blocks (true, meta.rows_count, matrix.row_ptr.get (), row_blocks.get ());
unsigned int *d_row_blocks {};
hipMalloc (&d_row_blocks, (blocks_count + 1) * sizeof (unsigned int));
hipMemcpy (d_row_blocks, row_blocks.get (), sizeof (unsigned int) * (blocks_count + 1), hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate (&start);
hipEventCreate (&stop);
hipDeviceSynchronize ();
hipEventRecord (start);
{
dim3 block_size = dim3 (NNZ_PER_WG);
dim3 grid_size {};
grid_size.x = blocks_count;
hipLaunchKernelGGL(( csr_adaptive_spmv_kernel), dim3(grid_size), dim3(block_size), 0, 0,
meta.rows_count, col_ids.get (), row_ptr.get (), d_row_blocks, A.get (), x.get (), y.get ());
}
hipEventRecord (stop);
hipEventSynchronize (stop);
float milliseconds = 0;
hipEventElapsedTime (&milliseconds, start, stop);
hipMemcpy (reusable_vector, y.get (), y_size * sizeof (data_type), hipMemcpyDeviceToHost);
hipFree (d_row_blocks);
compare_results (y_size, reusable_vector, reference_y);
const double elapsed = milliseconds / 1000;
return measurement_class ("GPU CSR-Adaptive", elapsed, 0, 0);
}
#define INSTANTIATE(data_type) \
template measurement_class gpu_csr_adaptive_spmv<data_type>( \
const csr_matrix_class<data_type> &matrix, \
resizable_gpu_memory<data_type> &A, \
resizable_gpu_memory<unsigned int> &col_ids, \
resizable_gpu_memory<unsigned int> &row_ptr, \
resizable_gpu_memory<data_type> &x, resizable_gpu_memory<data_type> &y, \
data_type *reusable_vector, const data_type *reference_y);
INSTANTIATE (float)
INSTANTIATE (double)
#undef INSTANTIATE
| b1902f9436eb8ff679647ec702108e31d26df22f.cu | //
// Created by egi on 11/3/19.
//
#include "csr_adaptive_spmv.h"
#include "reduce.cuh"
#define NNZ_PER_WG 64u ///< Should be power of two
template <typename data_type>
__global__ void fill_vector (unsigned int n, data_type *vec, data_type value)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
vec[i] = value;
}
__device__ unsigned int prev_power_of_2 (unsigned int n)
{
while (n & n - 1)
n = n & n - 1;
return n;
}
template <typename data_type>
__global__ void csr_adaptive_spmv_kernel (
const unsigned int n_rows,
const unsigned int *col_ids,
const unsigned int *row_ptr,
const unsigned int *row_blocks,
const data_type *data,
const data_type *x,
data_type *y)
{
const unsigned int block_row_begin = row_blocks[blockIdx.x];
const unsigned int block_row_end = row_blocks[blockIdx.x + 1];
const unsigned int nnz = row_ptr[block_row_end] - row_ptr[block_row_begin];
__shared__ data_type cache[NNZ_PER_WG];
if (block_row_end - block_row_begin > 1)
{
/// CSR-Stream case
const unsigned int i = threadIdx.x;
const unsigned int block_data_begin = row_ptr[block_row_begin];
const unsigned int thread_data_begin = block_data_begin + i;
if (i < nnz)
cache[i] = data[thread_data_begin] * x[col_ids[thread_data_begin]];
__syncthreads ();
const unsigned int threads_for_reduction = prev_power_of_2 (blockDim.x / (block_row_end - block_row_begin));
if (threads_for_reduction > 1)
{
/// Reduce all non zeroes of row by multiple thread
const unsigned int thread_in_block = i % threads_for_reduction;
const unsigned int local_row = block_row_begin + i / threads_for_reduction;
data_type dot = 0.0;
if (local_row < block_row_end)
{
const unsigned int local_first_element = row_ptr[local_row] - row_ptr[block_row_begin];
const unsigned int local_last_element = row_ptr[local_row + 1] - row_ptr[block_row_begin];
for (unsigned int local_element = local_first_element + thread_in_block;
local_element < local_last_element;
local_element += threads_for_reduction)
{
dot += cache[local_element];
}
}
__syncthreads ();
cache[i] = dot;
/// Now each row has threads_for_reduction values in cache
for (int j = threads_for_reduction / 2; j > 0; j /= 2)
{
/// Reduce for each row
__syncthreads ();
const bool use_result = thread_in_block < j && i + j < NNZ_PER_WG;
if (use_result)
dot += cache[i + j];
__syncthreads ();
if (use_result)
cache[i] = dot;
}
if (thread_in_block == 0 && local_row < block_row_end)
y[local_row] = dot;
}
else
{
/// Reduce all non zeroes of row by single thread
unsigned int local_row = block_row_begin + i;
while (local_row < block_row_end)
{
data_type dot = 0.0;
for (unsigned int j = row_ptr[local_row] - block_data_begin;
j < row_ptr[local_row + 1] - block_data_begin;
j++)
{
dot += cache[j];
}
y[local_row] = dot;
local_row += NNZ_PER_WG;
}
}
}
else
{
const unsigned int row = block_row_begin;
const unsigned int warp_id = threadIdx.x / 32;
const unsigned int lane = threadIdx.x % 32;
data_type dot = 0;
if (nnz <= 64 || NNZ_PER_WG <= 32)
{
/// CSR-Vector case
if (row < n_rows)
{
const unsigned int row_start = row_ptr[row];
const unsigned int row_end = row_ptr[row + 1];
for (unsigned int element = row_start + lane; element < row_end; element += 32)
dot += data[element] * x[col_ids[element]];
}
dot = warp_reduce (dot);
if (lane == 0 && warp_id == 0 && row < n_rows)
{
y[row] = dot;
}
}
else
{
/// CSR-VectorL case
if (row < n_rows)
{
const unsigned int row_start = row_ptr[row];
const unsigned int row_end = row_ptr[row + 1];
for (unsigned int element = row_start + threadIdx.x; element < row_end; element += blockDim.x)
dot += data[element] * x[col_ids[element]];
}
dot = warp_reduce (dot);
if (lane == 0)
cache[warp_id] = dot;
__syncthreads ();
if (warp_id == 0)
{
dot = 0.0;
for (unsigned int element = lane; element < blockDim.x / 32; element += 32)
dot += cache[element];
dot = warp_reduce (dot);
if (lane == 0 && row < n_rows)
{
y[row] = dot;
}
}
}
}
}
unsigned int
fill_row_blocks (
bool fill,
unsigned int rows_count,
const unsigned int *row_ptr,
unsigned int *row_blocks
)
{
if (fill)
row_blocks[0] = 0;
int last_i = 0;
int current_wg = 1;
unsigned int nnz_sum = 0;
for (int i = 1; i <= rows_count; i++)
{
nnz_sum += row_ptr[i] - row_ptr[i - 1];
if (nnz_sum == NNZ_PER_WG)
{
last_i = i;
if (fill)
row_blocks[current_wg] = i;
current_wg++;
nnz_sum = 0;
}
else if (nnz_sum > NNZ_PER_WG)
{
if (i - last_i > 1)
{
if (fill)
row_blocks[current_wg] = i - 1;
current_wg++;
i--;
}
else
{
if (fill)
row_blocks[current_wg] = i;
current_wg++;
}
last_i = i;
nnz_sum = 0;
}
else if (i - last_i > NNZ_PER_WG)
{
last_i = i;
if (fill)
row_blocks[current_wg] = i;
current_wg++;
nnz_sum = 0;
}
}
if (fill)
row_blocks[current_wg] = rows_count;
return current_wg;
}
template <typename data_type>
measurement_class gpu_csr_adaptive_spmv (
const csr_matrix_class<data_type> &matrix,
resizable_gpu_memory<data_type> &A,
resizable_gpu_memory<unsigned int> &col_ids,
resizable_gpu_memory<unsigned int> &row_ptr,
resizable_gpu_memory<data_type> &x,
resizable_gpu_memory<data_type> &y,
data_type*reusable_vector,
const data_type*reference_y)
{
auto &meta = matrix.meta;
const size_t A_size = matrix.get_matrix_size ();
const size_t col_ids_size = matrix.meta.non_zero_count;
const size_t row_ptr_size = matrix.meta.rows_count + 1;
const size_t x_size = matrix.meta.cols_count;
const size_t y_size = matrix.meta.rows_count;
A.resize (A_size);
col_ids.resize (col_ids_size);
row_ptr.resize (row_ptr_size);
x.resize (x_size);
y.resize (y_size);
cudaMemcpy (A.get (), matrix.data.get (), A_size * sizeof (data_type), cudaMemcpyHostToDevice);
cudaMemcpy (col_ids.get (), matrix.columns.get (), col_ids_size * sizeof (unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy (row_ptr.get (), matrix.row_ptr.get (), row_ptr_size * sizeof (unsigned int), cudaMemcpyHostToDevice);
{
dim3 block_size = dim3 (512);
dim3 grid_size {};
grid_size.x = (x_size + block_size.x - 1) / block_size.x;
fill_vector<data_type><<<grid_size, block_size>>> (x_size, x.get (), 1.0);
grid_size.y = (y_size + block_size.x - 1) / block_size.x;
fill_vector<data_type><<<grid_size, block_size>>> (y_size, y.get (), 0.0);
}
// fill delimiters
const unsigned int blocks_count = fill_row_blocks (false, meta.rows_count, matrix.row_ptr.get (), nullptr);
std::unique_ptr<unsigned int[]> row_blocks(new unsigned int[blocks_count + 1]);
fill_row_blocks (true, meta.rows_count, matrix.row_ptr.get (), row_blocks.get ());
unsigned int *d_row_blocks {};
cudaMalloc (&d_row_blocks, (blocks_count + 1) * sizeof (unsigned int));
cudaMemcpy (d_row_blocks, row_blocks.get (), sizeof (unsigned int) * (blocks_count + 1), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaDeviceSynchronize ();
cudaEventRecord (start);
{
dim3 block_size = dim3 (NNZ_PER_WG);
dim3 grid_size {};
grid_size.x = blocks_count;
csr_adaptive_spmv_kernel<<<grid_size, block_size>>> (
meta.rows_count, col_ids.get (), row_ptr.get (), d_row_blocks, A.get (), x.get (), y.get ());
}
cudaEventRecord (stop);
cudaEventSynchronize (stop);
float milliseconds = 0;
cudaEventElapsedTime (&milliseconds, start, stop);
cudaMemcpy (reusable_vector, y.get (), y_size * sizeof (data_type), cudaMemcpyDeviceToHost);
cudaFree (d_row_blocks);
compare_results (y_size, reusable_vector, reference_y);
const double elapsed = milliseconds / 1000;
return measurement_class ("GPU CSR-Adaptive", elapsed, 0, 0);
}
#define INSTANTIATE(data_type) \
template measurement_class gpu_csr_adaptive_spmv<data_type>( \
const csr_matrix_class<data_type> &matrix, \
resizable_gpu_memory<data_type> &A, \
resizable_gpu_memory<unsigned int> &col_ids, \
resizable_gpu_memory<unsigned int> &row_ptr, \
resizable_gpu_memory<data_type> &x, resizable_gpu_memory<data_type> &y, \
data_type *reusable_vector, const data_type *reference_y);
INSTANTIATE (float)
INSTANTIATE (double)
#undef INSTANTIATE
|
401cd501bd557f641f38d9a1c1f2eedc7271dbb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This program uses the method of reduction to add all elements of an array
*
*/
#include <stdio.h>
// =========== GLOBALS =========================
const int N = 200; // number of elements in array
// this needs to be a power of 2
const int threadsPerBlock = 256;
// Calculate number of blocks needed
const int blocksPerGrid = (N + threadsPerBlock - 1)/threadsPerBlock;
// GPU Kernel
__global__ void reduce(int *a, int *res){
// create shared memory for the threads in the block
__shared__ int cache[threadsPerBlock];
// get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// index into the cache for this block
int cacheIndex = threadIdx.x;
// set the value in cache
cache[cacheIndex] = a[tid];
__syncthreads(); //synchronize threads before continuing
int i = blockDim.x/2; // only want first half to do work
while( i != 0 ){
if (cacheIndex < i) // make sure we are not doing bogus add
// add the current index and ith element
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads(); // we want all threads to finish
i /= 2;
}
if (cacheIndex == 0) // only one thread needs to do this
*res = cache[0];
}
int main(void){
// initialize pointers
int *a, *res;
int *d_a, *d_res;
// allocate cpu memory
a = (int*)malloc(N*sizeof(int));
res = (int*)malloc(sizeof(int));
// allocate memory on GPU
hipMalloc((void**)&d_a, N * sizeof(int));
hipMalloc((void**)&d_res, sizeof(int));
// fill in "a" array
for (int i=0; i<N; i++){
a[i] = 2;
}
// copy from host to device
hipMemcpy(d_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_res, res, sizeof(int), hipMemcpyHostToDevice);
// run kernel
hipLaunchKernelGGL(( reduce), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_a, d_res);
// copy memory from gpu to cpu
hipMemcpy(res, d_res, sizeof(int), hipMemcpyDeviceToHost);
// print reslut
printf("Sum: %d\n", *res);
// clean up
hipFree(d_a);
hipFree(d_res);
free(a);
free(res);
}
| 401cd501bd557f641f38d9a1c1f2eedc7271dbb1.cu | /* This program uses the method of reduction to add all elements of an array
*
*/
#include <stdio.h>
// =========== GLOBALS =========================
const int N = 200; // number of elements in array
// this needs to be a power of 2
const int threadsPerBlock = 256;
// Calculate number of blocks needed
const int blocksPerGrid = (N + threadsPerBlock - 1)/threadsPerBlock;
// GPU Kernel
__global__ void reduce(int *a, int *res){
// create shared memory for the threads in the block
__shared__ int cache[threadsPerBlock];
// get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// index into the cache for this block
int cacheIndex = threadIdx.x;
// set the value in cache
cache[cacheIndex] = a[tid];
__syncthreads(); //synchronize threads before continuing
int i = blockDim.x/2; // only want first half to do work
while( i != 0 ){
if (cacheIndex < i) // make sure we are not doing bogus add
// add the current index and ith element
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads(); // we want all threads to finish
i /= 2;
}
if (cacheIndex == 0) // only one thread needs to do this
*res = cache[0];
}
int main(void){
// initialize pointers
int *a, *res;
int *d_a, *d_res;
// allocate cpu memory
a = (int*)malloc(N*sizeof(int));
res = (int*)malloc(sizeof(int));
// allocate memory on GPU
cudaMalloc((void**)&d_a, N * sizeof(int));
cudaMalloc((void**)&d_res, sizeof(int));
// fill in "a" array
for (int i=0; i<N; i++){
a[i] = 2;
}
// copy from host to device
cudaMemcpy(d_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_res, res, sizeof(int), cudaMemcpyHostToDevice);
// run kernel
reduce<<<blocksPerGrid,threadsPerBlock>>>(d_a, d_res);
// copy memory from gpu to cpu
cudaMemcpy(res, d_res, sizeof(int), cudaMemcpyDeviceToHost);
// print reslut
printf("Sum: %d\n", *res);
// clean up
cudaFree(d_a);
cudaFree(d_res);
free(a);
free(res);
}
|
6245edee684b68c21554cf95e487df5c0d910fc4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.h"
#include "reducedMathPlugin.h"
#include <iostream>
using nvinfer1::plugin::ReducedDivisor;
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void priorBoxKernel(
PriorBoxParameters param,
const int H,
const int W,
const int numPriors,
const int numAspectRatios,
const float* minSize,
const float* maxSize,
const float* aspectRatios,
float* outputData)
{
// output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4)
const int dim = H * W * numPriors;
const bool haveMaxSize = param.numMaxSize > 0;
const int dimAR = (haveMaxSize ? 1 : 0) + numAspectRatios;
for (int i = blockIdx.x * nthdsPerCTA + threadIdx.x;
i < dim; i += gridDim.x * nthdsPerCTA)
{
const int w = (i / numPriors) % W;
const int h = (i / numPriors) / W;
// Usually param.offset == 0.5
// Calucate the center of prior box at the input image scale
const float centerX = (w + param.offset) * param.stepW;
const float centerY = (h + param.offset) * param.stepH;
// Minimum size index
const int minSizeId = (i / dimAR) % param.numMinSize;
// Aspect ratio index
const int arId = i % dimAR;
// Generate square pior box of aspect ratio of 1.0, edge length of minSize[minSizeId]
if (arId == 0)
{
const float boxW = minSize[minSizeId];
const float boxH = boxW;
float x, y, z, w;
// Calculate [x_topleft, y_topleft, x_bottomright, y_bottomright]
// Coordinates were scaled to [0, 1] against the width or height of the original input image
x = (centerX - boxW / 2.0f) / param.imgW;
y = (centerY - boxH / 2.0f) / param.imgH;
z = (centerX + boxW / 2.0f) / param.imgW;
w = (centerY + boxH / 2.0f) / param.imgH;
// If we decided to clip the prior box make sure all the bounding box are inside the original input image
if (param.clip)
{
x = min(max(x, 0.0f), 1.0f);
y = min(max(y, 0.0f), 1.0f);
z = min(max(z, 0.0f), 1.0f);
w = min(max(w, 0.0f), 1.0f);
}
// Copy the bounding box coordinates to output
outputData[i * 4] = x;
outputData[i * 4 + 1] = y;
outputData[i * 4 + 2] = z;
outputData[i * 4 + 3] = w;
}
// If have maxSize
// Generate square pior box for aspect ratio of 1.0, edge length of sqrt(minSize[minSizeId] * maxSize[minSizeId])
// Described in SSD paper page 6
else if (haveMaxSize && arId == 1)
{
const float boxW = sqrt(minSize[minSizeId] * maxSize[minSizeId]);
const float boxH = boxW;
float x, y, z, w;
x = (centerX - boxW / 2.0f) / param.imgW;
y = (centerY - boxH / 2.0f) / param.imgH;
z = (centerX + boxW / 2.0f) / param.imgW;
w = (centerY + boxH / 2.0f) / param.imgH;
if (param.clip)
{
x = min(max(x, 0.0f), 1.0f);
y = min(max(y, 0.0f), 1.0f);
z = min(max(z, 0.0f), 1.0f);
w = min(max(w, 0.0f), 1.0f);
}
outputData[i * 4] = x;
outputData[i * 4 + 1] = y;
outputData[i * 4 + 2] = z;
outputData[i * 4 + 3] = w;
}
// Generate other bouding boxes with aspect ratios of not one.
else
{
const int arOffset = haveMaxSize ? arId - 1 : arId; // skip aspectRatios[0] which is 1
const float boxW = minSize[minSizeId] * sqrt(aspectRatios[arOffset]);
const float boxH = minSize[minSizeId] / sqrt(aspectRatios[arOffset]);
float x, y, z, w;
x = (centerX - boxW / 2.0f) / param.imgW;
y = (centerY - boxH / 2.0f) / param.imgH;
z = (centerX + boxW / 2.0f) / param.imgW;
w = (centerY + boxH / 2.0f) / param.imgH;
if (param.clip)
{
x = min(max(x, 0.0f), 1.0f);
y = min(max(y, 0.0f), 1.0f);
z = min(max(z, 0.0f), 1.0f);
w = min(max(w, 0.0f), 1.0f);
}
outputData[i * 4] = x;
outputData[i * 4 + 1] = y;
outputData[i * 4 + 2] = z;
outputData[i * 4 + 3] = w;
}
}
// Simply copy variance to from the parameter to output
float* output = outputData + dim * 4;
for (int i = blockIdx.x * nthdsPerCTA + threadIdx.x;
i < dim; i += gridDim.x * nthdsPerCTA)
{
float x, y, z, w;
x = param.variance[0];
y = param.variance[1];
z = param.variance[2];
w = param.variance[3];
output[i * 4] = x;
output[i * 4 + 1] = y;
output[i * 4 + 2] = z;
output[i * 4 + 3] = w;
}
}
pluginStatus_t priorBoxGpu(
hipStream_t stream,
const PriorBoxParameters param,
const int H,
const int W,
const int numPriors,
const int numAspectRatios,
const void* minSize,
const void* maxSize,
const void* aspectRatios,
void* outputData)
{
const int dim = H * W * numPriors;
if (dim > 5120)
{
const int BS = 128;
const int GS = (dim + BS - 1) / BS;
hipLaunchKernelGGL(( priorBoxKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, H, W, numPriors, numAspectRatios,
(const float*) minSize, (const float*) maxSize,
(const float*) aspectRatios, (float*) outputData);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
else
{
const int BS = 32;
const int GS = (dim + BS - 1) / BS;
hipLaunchKernelGGL(( priorBoxKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, H, W, numPriors, numAspectRatios,
(const float*) minSize, (const float*) maxSize,
(const float*) aspectRatios, (float*) outputData);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
}
pluginStatus_t priorBoxInference(
hipStream_t stream,
const PriorBoxParameters param,
const int H,
const int W,
const int numPriors,
const int numAspectRatios,
const void* minSize,
const void* maxSize,
const void* aspectRatios,
void* outputData)
{
ASSERT(param.numMaxSize >= 0);
if (param.numMaxSize)
return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios,
minSize, maxSize, aspectRatios, outputData);
else
return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios,
minSize, nullptr, aspectRatios, outputData);
}
namespace nvinfer1
{
namespace plugin
{
pluginStatus_t priorBoxInference(
hipStream_t stream,
const PriorBoxParameters param,
const int H,
const int W,
const int numPriors,
const int numAspectRatios,
const void* minSize,
const void* maxSize,
const void* aspectRatios,
void* outputData)
{
ASSERT(param.numMaxSize >= 0);
if (param.numMaxSize)
return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios,
minSize, maxSize, aspectRatios, outputData);
else
return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios,
minSize, nullptr, aspectRatios, outputData);
}
} // namespace nvinfer1
} // namespace plugin
| 6245edee684b68c21554cf95e487df5c0d910fc4.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.h"
#include "reducedMathPlugin.h"
#include <iostream>
using nvinfer1::plugin::ReducedDivisor;
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void priorBoxKernel(
PriorBoxParameters param,
const int H,
const int W,
const int numPriors,
const int numAspectRatios,
const float* minSize,
const float* maxSize,
const float* aspectRatios,
float* outputData)
{
// output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4)
const int dim = H * W * numPriors;
const bool haveMaxSize = param.numMaxSize > 0;
const int dimAR = (haveMaxSize ? 1 : 0) + numAspectRatios;
for (int i = blockIdx.x * nthdsPerCTA + threadIdx.x;
i < dim; i += gridDim.x * nthdsPerCTA)
{
const int w = (i / numPriors) % W;
const int h = (i / numPriors) / W;
// Usually param.offset == 0.5
// Calucate the center of prior box at the input image scale
const float centerX = (w + param.offset) * param.stepW;
const float centerY = (h + param.offset) * param.stepH;
// Minimum size index
const int minSizeId = (i / dimAR) % param.numMinSize;
// Aspect ratio index
const int arId = i % dimAR;
// Generate square pior box of aspect ratio of 1.0, edge length of minSize[minSizeId]
if (arId == 0)
{
const float boxW = minSize[minSizeId];
const float boxH = boxW;
float x, y, z, w;
// Calculate [x_topleft, y_topleft, x_bottomright, y_bottomright]
// Coordinates were scaled to [0, 1] against the width or height of the original input image
x = (centerX - boxW / 2.0f) / param.imgW;
y = (centerY - boxH / 2.0f) / param.imgH;
z = (centerX + boxW / 2.0f) / param.imgW;
w = (centerY + boxH / 2.0f) / param.imgH;
// If we decided to clip the prior box make sure all the bounding box are inside the original input image
if (param.clip)
{
x = min(max(x, 0.0f), 1.0f);
y = min(max(y, 0.0f), 1.0f);
z = min(max(z, 0.0f), 1.0f);
w = min(max(w, 0.0f), 1.0f);
}
// Copy the bounding box coordinates to output
outputData[i * 4] = x;
outputData[i * 4 + 1] = y;
outputData[i * 4 + 2] = z;
outputData[i * 4 + 3] = w;
}
// If have maxSize
// Generate square pior box for aspect ratio of 1.0, edge length of sqrt(minSize[minSizeId] * maxSize[minSizeId])
// Described in SSD paper page 6
else if (haveMaxSize && arId == 1)
{
const float boxW = sqrt(minSize[minSizeId] * maxSize[minSizeId]);
const float boxH = boxW;
float x, y, z, w;
x = (centerX - boxW / 2.0f) / param.imgW;
y = (centerY - boxH / 2.0f) / param.imgH;
z = (centerX + boxW / 2.0f) / param.imgW;
w = (centerY + boxH / 2.0f) / param.imgH;
if (param.clip)
{
x = min(max(x, 0.0f), 1.0f);
y = min(max(y, 0.0f), 1.0f);
z = min(max(z, 0.0f), 1.0f);
w = min(max(w, 0.0f), 1.0f);
}
outputData[i * 4] = x;
outputData[i * 4 + 1] = y;
outputData[i * 4 + 2] = z;
outputData[i * 4 + 3] = w;
}
// Generate other bouding boxes with aspect ratios of not one.
else
{
const int arOffset = haveMaxSize ? arId - 1 : arId; // skip aspectRatios[0] which is 1
const float boxW = minSize[minSizeId] * sqrt(aspectRatios[arOffset]);
const float boxH = minSize[minSizeId] / sqrt(aspectRatios[arOffset]);
float x, y, z, w;
x = (centerX - boxW / 2.0f) / param.imgW;
y = (centerY - boxH / 2.0f) / param.imgH;
z = (centerX + boxW / 2.0f) / param.imgW;
w = (centerY + boxH / 2.0f) / param.imgH;
if (param.clip)
{
x = min(max(x, 0.0f), 1.0f);
y = min(max(y, 0.0f), 1.0f);
z = min(max(z, 0.0f), 1.0f);
w = min(max(w, 0.0f), 1.0f);
}
outputData[i * 4] = x;
outputData[i * 4 + 1] = y;
outputData[i * 4 + 2] = z;
outputData[i * 4 + 3] = w;
}
}
// Simply copy variance to from the parameter to output
float* output = outputData + dim * 4;
for (int i = blockIdx.x * nthdsPerCTA + threadIdx.x;
i < dim; i += gridDim.x * nthdsPerCTA)
{
float x, y, z, w;
x = param.variance[0];
y = param.variance[1];
z = param.variance[2];
w = param.variance[3];
output[i * 4] = x;
output[i * 4 + 1] = y;
output[i * 4 + 2] = z;
output[i * 4 + 3] = w;
}
}
pluginStatus_t priorBoxGpu(
cudaStream_t stream,
const PriorBoxParameters param,
const int H,
const int W,
const int numPriors,
const int numAspectRatios,
const void* minSize,
const void* maxSize,
const void* aspectRatios,
void* outputData)
{
const int dim = H * W * numPriors;
if (dim > 5120)
{
const int BS = 128;
const int GS = (dim + BS - 1) / BS;
priorBoxKernel<BS><<<GS, BS, 0, stream>>>(param, H, W, numPriors, numAspectRatios,
(const float*) minSize, (const float*) maxSize,
(const float*) aspectRatios, (float*) outputData);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
else
{
const int BS = 32;
const int GS = (dim + BS - 1) / BS;
priorBoxKernel<BS><<<GS, BS, 0, stream>>>(param, H, W, numPriors, numAspectRatios,
(const float*) minSize, (const float*) maxSize,
(const float*) aspectRatios, (float*) outputData);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
}
pluginStatus_t priorBoxInference(
cudaStream_t stream,
const PriorBoxParameters param,
const int H,
const int W,
const int numPriors,
const int numAspectRatios,
const void* minSize,
const void* maxSize,
const void* aspectRatios,
void* outputData)
{
ASSERT(param.numMaxSize >= 0);
if (param.numMaxSize)
return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios,
minSize, maxSize, aspectRatios, outputData);
else
return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios,
minSize, nullptr, aspectRatios, outputData);
}
namespace nvinfer1
{
namespace plugin
{
pluginStatus_t priorBoxInference(
cudaStream_t stream,
const PriorBoxParameters param,
const int H,
const int W,
const int numPriors,
const int numAspectRatios,
const void* minSize,
const void* maxSize,
const void* aspectRatios,
void* outputData)
{
ASSERT(param.numMaxSize >= 0);
if (param.numMaxSize)
return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios,
minSize, maxSize, aspectRatios, outputData);
else
return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios,
minSize, nullptr, aspectRatios, outputData);
}
} // namespace nvinfer1
} // namespace plugin
|
55f128a791faf2998bac174a229b9649d2d5f438.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "mat.cuh"
__global__ void applyRes(unsigned char *in, xwtype *res, unsigned char *recon)
{
int i = blockDim.x*blockIdx.x+threadIdx.x;
recon[i] = (int)in[i] + res[i];
}
/*
__global__ void conv2mid(convtype *conv, midtype *mid, int num)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
while (i < num)
{
mid[i] = conv[i];
i += gridDim.x*blockDim.x;
}
}
__global__ void VectorDiv(midtype *dividend, xwtype *quotient, int divisor, int n)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
while (i < n)
{
quotient[i] = dividend[i] / divisor;
i += gridDim.x*blockDim.x;
}
}
*/
__host__ void findMax(convtype *data, convtype *buffer, int n, convtype *max)
{
findMax_reduce1 << <MAXGRID, BLOCKSIZE, BLOCKSIZE * sizeof(convtype) >> >(data, buffer, n);
hipDeviceSynchronize();
findMax_reduce2 << <1, MAXGRID / 2, sizeof(convtype)*MAXGRID / 2 >> >(buffer);
hipDeviceSynchronize();
hipMemcpyAsync(max, buffer, sizeof(convtype), hipMemcpyDeviceToHost);
}
__global__ void findMax_reduce1(convtype *g_idata, convtype *g_odata, int n)
{
extern __shared__ convtype sdata[];//BLOCKSIZE>=blockDim.x
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + tid;
unsigned int gridSize = blockDim.x * gridDim.x;
sdata[tid] = (abs(g_idata[i])>abs(g_idata[i + gridSize])) ? abs(g_idata[i]) : abs(g_idata[i + gridSize]);
i += gridSize * 2;
while (i < n) { if (sdata[tid] < abs(g_idata[i]))sdata[tid] = abs(g_idata[i]); i += gridSize; }
__syncthreads();
if (tid < 512) { if (sdata[tid] < sdata[tid + 512]) sdata[tid] = sdata[tid + 512]; }__syncthreads();
if (tid < 256) { if (sdata[tid] < sdata[tid + 256]) sdata[tid] = sdata[tid + 256]; } __syncthreads();
if (tid < 128) { if (sdata[tid] < sdata[tid + 128]) sdata[tid] = sdata[tid + 128]; } __syncthreads();
if (tid < 64) { if (sdata[tid] < sdata[tid + 64]) sdata[tid] = sdata[tid + 64]; } __syncthreads();
if (tid < 32)
{
if (sdata[tid] < sdata[tid + 32]) sdata[tid] = sdata[tid + 32];
if (sdata[tid] < sdata[tid + 16]) sdata[tid] = sdata[tid + 16];
if (sdata[tid] < sdata[tid + 8]) sdata[tid] = sdata[tid + 8];
if (sdata[tid] < sdata[tid + 4]) sdata[tid] = sdata[tid + 4];
if (sdata[tid] < sdata[tid + 2]) sdata[tid] = sdata[tid + 2];
if (sdata[tid] < sdata[tid + 1]) sdata[tid] = sdata[tid + 1];
}
if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; }
}
__global__ void findMax_reduce2(convtype *data)//number = 1024
{
extern __shared__ convtype sdata[];
unsigned int tid = threadIdx.x;
#if MAXGRID == 2048
sdata[tid] = data[tid] > data[tid + 1024] ? data[tid] : data[tid + 1024];
#elif MAXGRID == 1024
sdata[tid] = data[tid] > data[tid + 512] ? data[tid] : data[tid + 512];
#elif MAXGRID == 512
sdata[tid] = data[tid] > data[tid + 256] ? data[tid] : data[tid + 256];
#elif MAXGRID == 256
sdata[tid] = data[tid] > data[tid + 128] ? data[tid] : data[tid + 128];
#endif
#if MAXGRID >=2048
if (tid < 512) { if (sdata[tid] < sdata[tid + 512]) sdata[tid] = sdata[tid + 512]; } __syncthreads();
#endif
#if MAXGRID >=1024
if (tid < 256) { if (sdata[tid] < sdata[tid + 256]) sdata[tid] = sdata[tid + 256]; } __syncthreads();
#endif
#if MAXGRID >=512
if (tid < 128) { if (sdata[tid] < sdata[tid + 128]) sdata[tid] = sdata[tid + 128]; } __syncthreads();
#endif
#if MAXGRID >=256
if (tid < 64) { if (sdata[tid] < sdata[tid + 64]) sdata[tid] = sdata[tid + 64]; } __syncthreads();
#endif
if (tid < 32)
{
if (sdata[tid] < sdata[tid + 32]) sdata[tid] = sdata[tid + 32];
if (sdata[tid] < sdata[tid + 16]) sdata[tid] = sdata[tid + 16];
if (sdata[tid] < sdata[tid + 8]) sdata[tid] = sdata[tid + 8];
if (sdata[tid] < sdata[tid + 4]) sdata[tid] = sdata[tid + 4];
if (sdata[tid] < sdata[tid + 2]) sdata[tid] = sdata[tid + 2];
if (sdata[tid] < sdata[tid + 1]) sdata[tid] = sdata[tid + 1];
}
if (tid == 0)data[0] = sdata[tid];
}
char* HWCN2NCHW_VECT_C_CPU(char *HWCN, int H, int W, int C, int N, int *outSize)
{
int i, j, k, m;
int HWC_O, HW, HW4, W4, C_O, c_o, cv_o;
HW = H*W;
HW4 = HW * 4;
W4 = W * 4;
C_O = ceil((float)C / 4);
HWC_O = H*W*C_O * 4;
*outSize = N*C_O*H*W * 4;
char *NCHW_VECT_C = new char[*outSize];
memset(NCHW_VECT_C, 0, *outSize);
for (i = 0;i < N;i++)
for (j = 0;j < C;j++)
{
c_o = j >> 2;
cv_o = j & 3;
for (k = 0;k < H;k++)
for (m = 0;m < W;m++)
NCHW_VECT_C[i*HWC_O+c_o*HW4+k*W4+m*4+cv_o] = HWCN[k*W*C*N + m*C*N + j*N + i];
}
return NCHW_VECT_C;
}
char* NCHW2NCHW_VECT_C_CPU(char *NCHW, int N, int C, int H, int W, int *outSize)
{
int i, j, k, m;
int CHW_VECT, CHW, HW, HW4, W4, C_O, c_o, cv_o;
CHW = C*H*W;
HW = H*W;
C_O = ceil((float)C / 4)*4;
*outSize = N*C_O*H*W;
CHW_VECT = C_O*H*W;
HW4 = HW * 4;
W4 = W * 4;
char *NCHW_VECT_C = new char[*outSize];
memset(NCHW_VECT_C, 0, *outSize);
for (i = 0;i < N;i++)
for (j = 0;j < C;j++)
{
c_o = j >> 2;
cv_o = j & 3;
for (k = 0;k < H;k++)
for (m = 0;m < W;m++)
NCHW_VECT_C[i*CHW_VECT + c_o*HW4 + k*W4 + m * 4 + cv_o] = NCHW[i*CHW + j*HW + k*W + m];
}
return NCHW_VECT_C;
}
char* NCWH2HWCN_CPU(char *NCWH, int H, int W, int C, int N, int *outSize)
{
int i, j, k, l;
int WCN, CN, CWH, WH;
WCN = W*C*N;
CN = C*N;
CWH = C*W*H;
WH = W*H;
*outSize = N*C*W*H;
char *HWCN = new char[*outSize];
for (i = 0;i < N;i++)
for (j = 0;j < C;j++)
for (k = 0;k < W;k++)
for (l = 0;l < H;l++)
HWCN[l*WCN + k*CN + j*N + i] = NCWH[i*CWH + j*WH + k*H + l];
return HWCN;
}
xwtype* HWCN2NCHW_CPU(xwtype*HWCN, int H, int W, int C, int N, int *outSize)
{
int i, j, k, l;
int WCN, CN, CHW, HW;
WCN = W*C*N;
CN = C*N;
CHW = C*H*W;
HW = H*W;
*outSize = N*C*H*W;
xwtype *NCHW = new xwtype[*outSize];
for (i = 0;i < H;i++)
for (j = 0;j < W;j++)
for (k = 0;k < C;k++)
for (l = 0;l < N;l++)
NCHW[l*CHW + k*HW + i*W + j] = HWCN[i*WCN + j*CN + k*N + l];
return NCHW;
}
xwtype* HWCN2NHWC4_CPU(xwtype*HWCN, int H, int W, int C, int N, int *outSize)
{
//output numbers and channels must be multiple of 4 in inference, only channels are ensured here though.
int i, j, k, l;
int C_O = ceil((float)C / 4) * 4;
int WCN, CN, HWC_O, WC_O;
WCN = W*C*N;
CN = C*N;
HWC_O = H*W*C_O;
WC_O = W*C_O;
*outSize = N*H*W*C_O;
xwtype *NHWC4 = new xwtype[*outSize];
memset(NHWC4, 0, *outSize);
for (i = 0;i < H;i++)
for (j = 0;j < W;j++)
for (k = 0;k < C;k++)
for (l = 0;l < N;l++)
NHWC4[l*HWC_O + i*WC_O + j*C_O + k] = HWCN[i*WCN + j*CN + k*N + l];
return NHWC4;
}
__global__ void CHW2CHW_VECT_C(convtype *dividend, xwtype *quotient, int divisor, int channelSize, int channel, int gridSize)//
{
int tid;
int i, addr;
convtype temp;
tid = blockIdx.x*blockDim.x + threadIdx.x;
i = 0;
addr = tid;
while (i<channel)
{
while (addr < channelSize)
{
temp = dividend[addr];
if (temp < 0)
{
temp = (temp - (divisor >> 1)) / divisor;
if (temp < -128)
quotient[addr * 4 + (i & 3)] = -128;
else
quotient[addr * 4 + (i & 3)] = temp;
}
else
{
temp = (temp + (divisor >> 1)) / divisor;
if (temp > 127)
quotient[addr * 4 + (i & 3)] = 127;
else
quotient[addr * 4 + (i & 3)] = temp;
}
addr += gridSize;
}
i++;
dividend += channelSize;
addr = tid;
if ((i & 3) == 0)
{
quotient += channelSize * 4;
}
}
}
int NCHW2NCHW_VECT_C(convtype *dividend, xwtype *quotient, int divisor, int N, int C, int H, int W)
{
int i, j, c;
int frameSize = C*H*W;
int channelSize = H*W;
for (i = 0;i < N;i++)
{
CHW2CHW_VECT_C << <GRIDSIZE, BLOCKSIZE >> >(dividend + i*frameSize, quotient + i*frameSize, divisor, channelSize, C, GRIDSIZE*BLOCKSIZE);
}
return 0;
}
__global__ void mul_shift(convtype *input, xwtype *output, int inSize, int multiplier, int shifts)
{
int tid,bias,gridSize;
convtype temp;
tid = blockIdx.x*blockDim.x + threadIdx.x;
bias = (1 << shifts-1) / multiplier;
gridSize = gridDim.x*blockDim.x;
while (tid < inSize)
{
temp = input[tid];
output[tid] = (((int)temp + bias) * multiplier) >> shifts;
tid += gridSize;
}
}
__global__ void mul_shift_inplace(convtype *input, int inSize, int multiplier, int shifts)
{
int tid, bias, gridSize;
convtype temp;
tid = blockIdx.x*blockDim.x + threadIdx.x;
bias = (1 << shifts - 1) / multiplier;
gridSize = gridDim.x*blockDim.x;
while (tid < inSize)
{
temp = input[tid];
if(temp>0)
input[tid] = (((int)temp + bias) * multiplier) >> shifts;
else
input[tid] = (((int)temp - bias) * multiplier) >> shifts;
tid += gridSize;
}
}
__global__ void CHW2CHW_VECT_C_QUANT_BLU(convtype *input, xwtype *output, int channelSize, int channel, int gridSize, int blu, int multiplier, int shifts)
{
int tid;
int i, addr, bias,temp1;
convtype temp;
tid = blockIdx.x*blockDim.x + threadIdx.x;
bias = (1 << shifts-1) / multiplier;
i = 0;
addr = tid;
while (i<channel)
{
while (addr < channelSize)
{
temp = input[addr];
/*
temp = ((temp + bias) * multiplier) >> shifts;
if(temp>THRESHOLD)
output[addr * 4 + (i & 3)] = THRESHOLD;
else if(temp<0)
output[addr * 4 + (i & 3)] = 0;
else
output[addr * 4 + (i & 3)] = temp;
*/
//temp1 = (((int)temp + bias) * multiplier) >> shifts;
if (temp > blu)
{
input[addr] = blu;
output[addr * 4 + (i & 3)] = THRESHOLD;
}
else if (temp < 0)
{
input[addr] = 0;
output[addr * 4 + (i & 3)] = 0;
}
else
output[addr * 4 + (i & 3)] = (((int)temp + bias) * multiplier) >> shifts;
addr += gridSize;
}
i++;
input += channelSize;
addr = tid;
if ((i & 3) == 0)
{
output += channelSize * 4;
}
}
}
int NCHW2NCHW_VECT_C_QUANT_BLU(convtype *before, xwtype *after, int N, int C, int H, int W, int blu, int multiplier, int shifts)
{
int i, j, c;
int frameSize = C*H*W;
int channelSize = H*W;
for (i = 0;i < N;i++)
{
CHW2CHW_VECT_C_QUANT_BLU << <GRIDSIZE, BLOCKSIZE >> > (before + i*frameSize, after + i*frameSize, channelSize, C, GRIDSIZE*BLOCKSIZE, blu, multiplier, shifts);
}
return 0;
}
double mse(float*x, int size, int gpu, const char*fn, int offset)
{
float*buffer, *input;
int i;
double mse;
FILE*fp;
if (fopen_s(&fp, fn, "rb"))
{
printf("%s\nopen feature file failed\n", fn);
exit(1);
}
buffer = new float[size];
fseek(fp, offset, SEEK_SET);
fread(buffer, sizeof(float), size, fp);
fclose(fp);
if (gpu)
{
input = new float[size];
hipDeviceSynchronize();
hipMemcpy(input, x, sizeof(float)*size, hipMemcpyDeviceToHost);
}
else
input = x;
mse = 0;
for (i = 0;i < size;i++)
if (buffer[i] != input[i])
{
printf("diff at %d\n", i);
mse += fabs(buffer[i] - input[i]);
}
mse /= size;
delete[] buffer;
if (gpu)
delete[] input;
printf("mse=%f\n", mse);
return mse;
}
int load_tensor(float*x, int size, const char*fn, int offset)
{
float*x_h;
FILE*fp;
int error_code;
if (fopen_s(&fp, fn, "rb"))
{
printf("failed to open %s\n", fn);
exit(1);
}
x_h = new float[size];
fseek(fp, offset, SEEK_SET);
fread(x_h, sizeof(float), size, fp);
fclose(fp);
error_code = hipMemcpy(x, x_h, sizeof(float)*size, hipMemcpyHostToDevice);
delete[] x_h;
return error_code;
}
| 55f128a791faf2998bac174a229b9649d2d5f438.cu | #include <iostream>
#include "mat.cuh"
__global__ void applyRes(unsigned char *in, xwtype *res, unsigned char *recon)
{
int i = blockDim.x*blockIdx.x+threadIdx.x;
recon[i] = (int)in[i] + res[i];
}
/*
__global__ void conv2mid(convtype *conv, midtype *mid, int num)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
while (i < num)
{
mid[i] = conv[i];
i += gridDim.x*blockDim.x;
}
}
__global__ void VectorDiv(midtype *dividend, xwtype *quotient, int divisor, int n)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
while (i < n)
{
quotient[i] = dividend[i] / divisor;
i += gridDim.x*blockDim.x;
}
}
*/
__host__ void findMax(convtype *data, convtype *buffer, int n, convtype *max)
{
findMax_reduce1 << <MAXGRID, BLOCKSIZE, BLOCKSIZE * sizeof(convtype) >> >(data, buffer, n);
cudaDeviceSynchronize();
findMax_reduce2 << <1, MAXGRID / 2, sizeof(convtype)*MAXGRID / 2 >> >(buffer);
cudaDeviceSynchronize();
cudaMemcpyAsync(max, buffer, sizeof(convtype), cudaMemcpyDeviceToHost);
}
__global__ void findMax_reduce1(convtype *g_idata, convtype *g_odata, int n)
{
extern __shared__ convtype sdata[];//BLOCKSIZE>=blockDim.x
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + tid;
unsigned int gridSize = blockDim.x * gridDim.x;
sdata[tid] = (abs(g_idata[i])>abs(g_idata[i + gridSize])) ? abs(g_idata[i]) : abs(g_idata[i + gridSize]);
i += gridSize * 2;
while (i < n) { if (sdata[tid] < abs(g_idata[i]))sdata[tid] = abs(g_idata[i]); i += gridSize; }
__syncthreads();
if (tid < 512) { if (sdata[tid] < sdata[tid + 512]) sdata[tid] = sdata[tid + 512]; }__syncthreads();
if (tid < 256) { if (sdata[tid] < sdata[tid + 256]) sdata[tid] = sdata[tid + 256]; } __syncthreads();
if (tid < 128) { if (sdata[tid] < sdata[tid + 128]) sdata[tid] = sdata[tid + 128]; } __syncthreads();
if (tid < 64) { if (sdata[tid] < sdata[tid + 64]) sdata[tid] = sdata[tid + 64]; } __syncthreads();
if (tid < 32)
{
if (sdata[tid] < sdata[tid + 32]) sdata[tid] = sdata[tid + 32];
if (sdata[tid] < sdata[tid + 16]) sdata[tid] = sdata[tid + 16];
if (sdata[tid] < sdata[tid + 8]) sdata[tid] = sdata[tid + 8];
if (sdata[tid] < sdata[tid + 4]) sdata[tid] = sdata[tid + 4];
if (sdata[tid] < sdata[tid + 2]) sdata[tid] = sdata[tid + 2];
if (sdata[tid] < sdata[tid + 1]) sdata[tid] = sdata[tid + 1];
}
if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; }
}
__global__ void findMax_reduce2(convtype *data)//number = 1024
{
extern __shared__ convtype sdata[];
unsigned int tid = threadIdx.x;
#if MAXGRID == 2048
sdata[tid] = data[tid] > data[tid + 1024] ? data[tid] : data[tid + 1024];
#elif MAXGRID == 1024
sdata[tid] = data[tid] > data[tid + 512] ? data[tid] : data[tid + 512];
#elif MAXGRID == 512
sdata[tid] = data[tid] > data[tid + 256] ? data[tid] : data[tid + 256];
#elif MAXGRID == 256
sdata[tid] = data[tid] > data[tid + 128] ? data[tid] : data[tid + 128];
#endif
#if MAXGRID >=2048
if (tid < 512) { if (sdata[tid] < sdata[tid + 512]) sdata[tid] = sdata[tid + 512]; } __syncthreads();
#endif
#if MAXGRID >=1024
if (tid < 256) { if (sdata[tid] < sdata[tid + 256]) sdata[tid] = sdata[tid + 256]; } __syncthreads();
#endif
#if MAXGRID >=512
if (tid < 128) { if (sdata[tid] < sdata[tid + 128]) sdata[tid] = sdata[tid + 128]; } __syncthreads();
#endif
#if MAXGRID >=256
if (tid < 64) { if (sdata[tid] < sdata[tid + 64]) sdata[tid] = sdata[tid + 64]; } __syncthreads();
#endif
if (tid < 32)
{
if (sdata[tid] < sdata[tid + 32]) sdata[tid] = sdata[tid + 32];
if (sdata[tid] < sdata[tid + 16]) sdata[tid] = sdata[tid + 16];
if (sdata[tid] < sdata[tid + 8]) sdata[tid] = sdata[tid + 8];
if (sdata[tid] < sdata[tid + 4]) sdata[tid] = sdata[tid + 4];
if (sdata[tid] < sdata[tid + 2]) sdata[tid] = sdata[tid + 2];
if (sdata[tid] < sdata[tid + 1]) sdata[tid] = sdata[tid + 1];
}
if (tid == 0)data[0] = sdata[tid];
}
char* HWCN2NCHW_VECT_C_CPU(char *HWCN, int H, int W, int C, int N, int *outSize)
{
int i, j, k, m;
int HWC_O, HW, HW4, W4, C_O, c_o, cv_o;
HW = H*W;
HW4 = HW * 4;
W4 = W * 4;
C_O = ceil((float)C / 4);
HWC_O = H*W*C_O * 4;
*outSize = N*C_O*H*W * 4;
char *NCHW_VECT_C = new char[*outSize];
memset(NCHW_VECT_C, 0, *outSize);
for (i = 0;i < N;i++)
for (j = 0;j < C;j++)
{
c_o = j >> 2;
cv_o = j & 3;
for (k = 0;k < H;k++)
for (m = 0;m < W;m++)
NCHW_VECT_C[i*HWC_O+c_o*HW4+k*W4+m*4+cv_o] = HWCN[k*W*C*N + m*C*N + j*N + i];
}
return NCHW_VECT_C;
}
char* NCHW2NCHW_VECT_C_CPU(char *NCHW, int N, int C, int H, int W, int *outSize)
{
int i, j, k, m;
int CHW_VECT, CHW, HW, HW4, W4, C_O, c_o, cv_o;
CHW = C*H*W;
HW = H*W;
C_O = ceil((float)C / 4)*4;
*outSize = N*C_O*H*W;
CHW_VECT = C_O*H*W;
HW4 = HW * 4;
W4 = W * 4;
char *NCHW_VECT_C = new char[*outSize];
memset(NCHW_VECT_C, 0, *outSize);
for (i = 0;i < N;i++)
for (j = 0;j < C;j++)
{
c_o = j >> 2;
cv_o = j & 3;
for (k = 0;k < H;k++)
for (m = 0;m < W;m++)
NCHW_VECT_C[i*CHW_VECT + c_o*HW4 + k*W4 + m * 4 + cv_o] = NCHW[i*CHW + j*HW + k*W + m];
}
return NCHW_VECT_C;
}
char* NCWH2HWCN_CPU(char *NCWH, int H, int W, int C, int N, int *outSize)
{
int i, j, k, l;
int WCN, CN, CWH, WH;
WCN = W*C*N;
CN = C*N;
CWH = C*W*H;
WH = W*H;
*outSize = N*C*W*H;
char *HWCN = new char[*outSize];
for (i = 0;i < N;i++)
for (j = 0;j < C;j++)
for (k = 0;k < W;k++)
for (l = 0;l < H;l++)
HWCN[l*WCN + k*CN + j*N + i] = NCWH[i*CWH + j*WH + k*H + l];
return HWCN;
}
xwtype* HWCN2NCHW_CPU(xwtype*HWCN, int H, int W, int C, int N, int *outSize)
{
int i, j, k, l;
int WCN, CN, CHW, HW;
WCN = W*C*N;
CN = C*N;
CHW = C*H*W;
HW = H*W;
*outSize = N*C*H*W;
xwtype *NCHW = new xwtype[*outSize];
for (i = 0;i < H;i++)
for (j = 0;j < W;j++)
for (k = 0;k < C;k++)
for (l = 0;l < N;l++)
NCHW[l*CHW + k*HW + i*W + j] = HWCN[i*WCN + j*CN + k*N + l];
return NCHW;
}
xwtype* HWCN2NHWC4_CPU(xwtype*HWCN, int H, int W, int C, int N, int *outSize)
{
//output numbers and channels must be multiple of 4 in inference, only channels are ensured here though.
int i, j, k, l;
int C_O = ceil((float)C / 4) * 4;
int WCN, CN, HWC_O, WC_O;
WCN = W*C*N;
CN = C*N;
HWC_O = H*W*C_O;
WC_O = W*C_O;
*outSize = N*H*W*C_O;
xwtype *NHWC4 = new xwtype[*outSize];
memset(NHWC4, 0, *outSize);
for (i = 0;i < H;i++)
for (j = 0;j < W;j++)
for (k = 0;k < C;k++)
for (l = 0;l < N;l++)
NHWC4[l*HWC_O + i*WC_O + j*C_O + k] = HWCN[i*WCN + j*CN + k*N + l];
return NHWC4;
}
__global__ void CHW2CHW_VECT_C(convtype *dividend, xwtype *quotient, int divisor, int channelSize, int channel, int gridSize)//应明确指定用途,尤其是正负数
{
int tid;
int i, addr;
convtype temp;
tid = blockIdx.x*blockDim.x + threadIdx.x;
i = 0;
addr = tid;
while (i<channel)
{
while (addr < channelSize)
{
temp = dividend[addr];
if (temp < 0)
{
temp = (temp - (divisor >> 1)) / divisor;
if (temp < -128)
quotient[addr * 4 + (i & 3)] = -128;
else
quotient[addr * 4 + (i & 3)] = temp;
}
else
{
temp = (temp + (divisor >> 1)) / divisor;
if (temp > 127)
quotient[addr * 4 + (i & 3)] = 127;
else
quotient[addr * 4 + (i & 3)] = temp;
}
addr += gridSize;
}
i++;
dividend += channelSize;
addr = tid;
if ((i & 3) == 0)
{
quotient += channelSize * 4;
}
}
}
int NCHW2NCHW_VECT_C(convtype *dividend, xwtype *quotient, int divisor, int N, int C, int H, int W)
{
int i, j, c;
int frameSize = C*H*W;
int channelSize = H*W;
for (i = 0;i < N;i++)
{
CHW2CHW_VECT_C << <GRIDSIZE, BLOCKSIZE >> >(dividend + i*frameSize, quotient + i*frameSize, divisor, channelSize, C, GRIDSIZE*BLOCKSIZE);
}
return 0;
}
__global__ void mul_shift(convtype *input, xwtype *output, int inSize, int multiplier, int shifts)
{
int tid,bias,gridSize;
convtype temp;
tid = blockIdx.x*blockDim.x + threadIdx.x;
bias = (1 << shifts-1) / multiplier;
gridSize = gridDim.x*blockDim.x;
while (tid < inSize)
{
temp = input[tid];
output[tid] = (((int)temp + bias) * multiplier) >> shifts;
tid += gridSize;
}
}
__global__ void mul_shift_inplace(convtype *input, int inSize, int multiplier, int shifts)
{
int tid, bias, gridSize;
convtype temp;
tid = blockIdx.x*blockDim.x + threadIdx.x;
bias = (1 << shifts - 1) / multiplier;
gridSize = gridDim.x*blockDim.x;
while (tid < inSize)
{
temp = input[tid];
if(temp>0)
input[tid] = (((int)temp + bias) * multiplier) >> shifts;
else
input[tid] = (((int)temp - bias) * multiplier) >> shifts;
tid += gridSize;
}
}
__global__ void CHW2CHW_VECT_C_QUANT_BLU(convtype *input, xwtype *output, int channelSize, int channel, int gridSize, int blu, int multiplier, int shifts)
{
int tid;
int i, addr, bias,temp1;
convtype temp;
tid = blockIdx.x*blockDim.x + threadIdx.x;
bias = (1 << shifts-1) / multiplier;
i = 0;
addr = tid;
while (i<channel)
{
while (addr < channelSize)
{
temp = input[addr];
/*
temp = ((temp + bias) * multiplier) >> shifts;
if(temp>THRESHOLD)
output[addr * 4 + (i & 3)] = THRESHOLD;
else if(temp<0)
output[addr * 4 + (i & 3)] = 0;
else
output[addr * 4 + (i & 3)] = temp;
*/
//temp1 = (((int)temp + bias) * multiplier) >> shifts;
if (temp > blu)
{
input[addr] = blu;
output[addr * 4 + (i & 3)] = THRESHOLD;
}
else if (temp < 0)
{
input[addr] = 0;
output[addr * 4 + (i & 3)] = 0;
}
else
output[addr * 4 + (i & 3)] = (((int)temp + bias) * multiplier) >> shifts;
addr += gridSize;
}
i++;
input += channelSize;
addr = tid;
if ((i & 3) == 0)
{
output += channelSize * 4;
}
}
}
int NCHW2NCHW_VECT_C_QUANT_BLU(convtype *before, xwtype *after, int N, int C, int H, int W, int blu, int multiplier, int shifts)
{
int i, j, c;
int frameSize = C*H*W;
int channelSize = H*W;
for (i = 0;i < N;i++)
{
CHW2CHW_VECT_C_QUANT_BLU << <GRIDSIZE, BLOCKSIZE >> > (before + i*frameSize, after + i*frameSize, channelSize, C, GRIDSIZE*BLOCKSIZE, blu, multiplier, shifts);
}
return 0;
}
double mse(float*x, int size, int gpu, const char*fn, int offset)
{
float*buffer, *input;
int i;
double mse;
FILE*fp;
if (fopen_s(&fp, fn, "rb"))
{
printf("%s\nopen feature file failed\n", fn);
exit(1);
}
buffer = new float[size];
fseek(fp, offset, SEEK_SET);
fread(buffer, sizeof(float), size, fp);
fclose(fp);
if (gpu)
{
input = new float[size];
cudaDeviceSynchronize();
cudaMemcpy(input, x, sizeof(float)*size, cudaMemcpyDeviceToHost);
}
else
input = x;
mse = 0;
for (i = 0;i < size;i++)
if (buffer[i] != input[i])
{
printf("diff at %d\n", i);
mse += fabs(buffer[i] - input[i]);
}
mse /= size;
delete[] buffer;
if (gpu)
delete[] input;
printf("mse=%f\n", mse);
return mse;
}
int load_tensor(float*x, int size, const char*fn, int offset)
{
float*x_h;
FILE*fp;
int error_code;
if (fopen_s(&fp, fn, "rb"))
{
printf("failed to open %s\n", fn);
exit(1);
}
x_h = new float[size];
fseek(fp, offset, SEEK_SET);
fread(x_h, sizeof(float), size, fp);
fclose(fp);
error_code = cudaMemcpy(x, x_h, sizeof(float)*size, cudaMemcpyHostToDevice);
delete[] x_h;
return error_code;
}
|
7b9f1016a071c49cc74bbaccc9d6bc29895e320c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void DataPointMap(int *size, const double *inputX, const double *inputY, double *output, const double *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const double *inArrayBody = &inputX[ix* *length];
double *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
} | 7b9f1016a071c49cc74bbaccc9d6bc29895e320c.cu | #include "includes.h"
__global__ void DataPointMap(int *size, const double *inputX, const double *inputY, double *output, const double *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const double *inArrayBody = &inputX[ix* *length];
double *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
} |
3e21b821c61c3e601e1d6630c70d041d7c6f1336.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2023 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
/*! \file NeighborListGPU.cu
\brief Defines GPU kernel code for neighbor list processing on the GPU
*/
#include "NeighborListGPU_hip.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#pragma GCC diagnostic pop
namespace hoomd
{
namespace md
{
namespace kernel
{
/*! \param d_result Device pointer to a single uint. Will be set to 1 if an update is needed
\param d_last_pos Particle positions at the time the nlist was last updated
\param d_pos Current particle positions
\param nwork Number of particles this GPU processes
\param box Box dimensions
\param d_rcut_max The maximum rcut(i,j) that any particle of type i participates in
\param r_buff The buffer size that particles can move in
\param ntypes The number of particle types
\param lambda_min Minimum contraction of deformation tensor
\param lambda Diagonal deformation tensor (for orthorhombic boundaries)
\param checkn
gpu_nlist_needs_update_check_new_kernel() executes one thread per particle. Every particle's
current position is compared to its last position. If the particle has moved a distance more than
the buffer width, then *d_result is set to \a checkn.
*/
__global__ void gpu_nlist_needs_update_check_new_kernel(unsigned int* d_result,
const Scalar4* d_last_pos,
const Scalar4* d_pos,
const unsigned int nwork,
const BoxDim box,
const Scalar* d_rcut_max,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar lambda_min,
const Scalar3 lambda,
const unsigned int checkn,
const unsigned int offset)
{
// each thread will compare vs it's old position to see if the list needs updating
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nwork)
{
// get particle index
idx += offset;
Scalar4 cur_postype = d_pos[idx];
Scalar3 cur_pos = make_scalar3(cur_postype.x, cur_postype.y, cur_postype.z);
const unsigned int cur_type = __scalar_as_int(cur_postype.w);
Scalar4 last_postype = d_last_pos[idx];
Scalar3 last_pos = make_scalar3(last_postype.x, last_postype.y, last_postype.z);
Scalar3 dx = cur_pos - lambda * last_pos;
dx = box.minImage(dx);
const Scalar rmin = __ldg(d_rcut_max + cur_type);
const Scalar rmax = rmin + r_buff;
const Scalar delta_max = (rmax * lambda_min - rmin) / Scalar(2.0);
Scalar maxshiftsq = (delta_max > 0) ? delta_max * delta_max : 0.0f;
if (dot(dx, dx) >= maxshiftsq)
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(d_result, checkn);
#else
atomicMax(d_result, checkn);
#endif
}
}
hipError_t gpu_nlist_needs_update_check_new(unsigned int* d_result,
const Scalar4* d_last_pos,
const Scalar4* d_pos,
const unsigned int N,
const BoxDim& box,
const Scalar* d_rcut_max,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar lambda_min,
const Scalar3 lambda,
const unsigned int checkn,
const GPUPartition& gpu_partition)
{
unsigned int block_size = 128;
// iterate over active GPUs in reverse order
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
int n_blocks = nwork / block_size + 1;
hipLaunchKernelGGL((gpu_nlist_needs_update_check_new_kernel),
dim3(n_blocks),
dim3(block_size),
0,
0,
d_result,
d_last_pos,
d_pos,
nwork,
box,
d_rcut_max,
r_buff,
ntypes,
lambda_min,
lambda,
checkn,
range.first);
}
return hipSuccess;
}
//! Number of elements of the exclusion list to process in each batch
const unsigned int FILTER_BATCH_SIZE = 4;
/*! \param d_n_neigh Number of neighbors for each particle (read/write)
\param d_nlist Neighbor list for each particle (read/write)
\param nli Indexer for indexing into d_nlist
\param d_n_ex Number of exclusions for each particle
\param d_ex_list List of exclusions for each particle
\param exli Indexer for indexing into d_ex_list
\param N Number of particles
\param ex_start Start filtering the nlist from exclusion number \a ex_start
gpu_nlist_filter_kernel() processes the neighbor list \a d_nlist and removes any entries that
are excluded. To allow for an arbitrary large number of exclusions, these are processed in batch
sizes of FILTER_BATCH_SIZE. The kernel must be called multiple times in order to fully remove all
exclusions from the nlist.
\note The driver gpu_nlist_filter properly makes as many calls as are necessary, it only needs
to be called once.
\b Implementation
One thread is run for each particle. Exclusions \a ex_start, \a ex_start + 1, ... are loaded in
for that particle (or the thread returns if there are no exclusions past that point). The thread
then loops over the neighbor list, comparing each entry to the list of exclusions. If the entry
is not excluded, it is written back out. \a d_n_neigh is updated to reflect the current number of
particles in the list at the end of the kernel call.
*/
__global__ void gpu_nlist_filter_kernel(unsigned int* d_n_neigh,
unsigned int* d_nlist,
const size_t* d_head_list,
const unsigned int* d_n_ex,
const unsigned int* d_ex_list,
const Index2D exli,
const unsigned int N,
const unsigned int ex_start)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the particle list
if (idx >= N)
return;
const unsigned int n_neigh = d_n_neigh[idx];
const unsigned int n_ex = d_n_ex[idx];
unsigned int new_n_neigh = 0;
// quit now if the ex_start flag is past the end of n_ex
if (ex_start >= n_ex)
return;
// count the number of exclusions to process in this thread
const unsigned int n_ex_process = n_ex - ex_start;
// load the exclusion list into "local" memory - fully unrolled loops should dump this into
// registers
unsigned int l_ex_list[FILTER_BATCH_SIZE];
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_ex_idx < n_ex_process)
l_ex_list[cur_ex_idx] = d_ex_list[exli(idx, cur_ex_idx + ex_start)];
else
l_ex_list[cur_ex_idx] = 0xffffffff;
}
// loop over the list, regenerating it as we go
const size_t my_head = d_head_list[idx];
for (unsigned int cur_neigh_idx = 0; cur_neigh_idx < n_neigh; cur_neigh_idx++)
{
unsigned int cur_neigh = d_nlist[my_head + cur_neigh_idx];
// test if excluded
bool excluded = false;
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_neigh == l_ex_list[cur_ex_idx])
excluded = true;
}
// add it back to the list if it is not excluded
if (!excluded)
{
if (new_n_neigh != cur_neigh_idx)
d_nlist[my_head + new_n_neigh] = cur_neigh;
new_n_neigh++;
}
}
// update the number of neighbors
d_n_neigh[idx] = new_n_neigh;
}
hipError_t gpu_nlist_filter(unsigned int* d_n_neigh,
unsigned int* d_nlist,
const size_t* d_head_list,
const unsigned int* d_n_ex,
const unsigned int* d_ex_list,
const Index2D& exli,
const unsigned int N,
const unsigned int block_size)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_nlist_filter_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(block_size, max_block_size);
// determine parameters for kernel launch
int n_blocks = N / run_block_size + 1;
// split the processing of the full exclusion list up into a number of batches
unsigned int n_batches = (unsigned int)ceil(double(exli.getH()) / double(FILTER_BATCH_SIZE));
unsigned int ex_start = 0;
for (unsigned int batch = 0; batch < n_batches; batch++)
{
hipLaunchKernelGGL((gpu_nlist_filter_kernel),
dim3(n_blocks),
dim3(run_block_size),
0,
0,
d_n_neigh,
d_nlist,
d_head_list,
d_n_ex,
d_ex_list,
exli,
N,
ex_start);
ex_start += FILTER_BATCH_SIZE;
}
return hipSuccess;
}
//! GPU kernel to update the exclusions list
__global__ void gpu_update_exclusion_list_kernel(const unsigned int* tags,
const unsigned int* rtags,
const unsigned int* n_ex_tag,
const unsigned int* ex_list_tag,
const Index2D ex_list_tag_indexer,
unsigned int* n_ex_idx,
unsigned int* ex_list_idx,
const Index2D ex_list_indexer,
const unsigned int N)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
unsigned int tag = tags[idx];
unsigned int n = n_ex_tag[tag];
// copy over number of exclusions
n_ex_idx[idx] = n;
for (unsigned int offset = 0; offset < n; offset++)
{
unsigned int ex_tag = ex_list_tag[ex_list_tag_indexer(tag, offset)];
unsigned int ex_idx = rtags[ex_tag];
ex_list_idx[ex_list_indexer(idx, offset)] = ex_idx;
}
}
//! GPU function to update the exclusion list on the device
/*! \param d_tag Array of particle tags
\param d_rtag Array of reverse-lookup tag->idx
\param d_n_ex_tag List of number of exclusions per tag
\param d_ex_list_tag 2D Exclusion list per tag
\param ex_list_tag_indexer Indexer for per-tag exclusion list
\param d_n_ex_idx List of number of exclusions per idx
\param d_ex_list_idx Exclusion list per idx
\param ex_list_indexer Indexer for per-idx exclusion list
\param N number of particles
*/
hipError_t gpu_update_exclusion_list(const unsigned int* d_tag,
const unsigned int* d_rtag,
const unsigned int* d_n_ex_tag,
const unsigned int* d_ex_list_tag,
const Index2D& ex_list_tag_indexer,
unsigned int* d_n_ex_idx,
unsigned int* d_ex_list_idx,
const Index2D& ex_list_indexer,
const unsigned int N)
{
unsigned int block_size = 256;
hipLaunchKernelGGL((gpu_update_exclusion_list_kernel),
dim3(N / block_size + 1),
dim3(block_size),
0,
0,
d_tag,
d_rtag,
d_n_ex_tag,
d_ex_list_tag,
ex_list_tag_indexer,
d_n_ex_idx,
d_ex_list_idx,
ex_list_indexer,
N);
return hipSuccess;
}
//! GPU kernel to do a preliminary sizing on particles
/*!
* \param d_head_list The head list of indexes to overwrite
* \param d_req_size_nlist Flag for the required size of the neighbor list to overwrite
* \param d_Nmax The number of neighbors to size per particle type
* \param d_pos Particle positions and types
* \param N the number of particles on this rank
* \param ntypes the number of types in the system
*
* This kernel initializes the head list with the number of neighbors that each type expects from
* d_Nmax. A prefix sum is then performed in gpu_nlist_build_head_list() to accumulate starting
* indices.
*/
__global__ void gpu_nlist_init_head_list_kernel(size_t* d_head_list,
size_t* d_req_size_nlist,
const unsigned int* d_Nmax,
const Scalar4* d_pos,
const unsigned int N,
const unsigned int ntypes)
{
// particle index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N)
return;
const Scalar4 postype_i = d_pos[idx];
const unsigned int type_i = __scalar_as_int(postype_i.w);
const unsigned int Nmax_i = __ldg(d_Nmax + type_i);
d_head_list[idx] = Nmax_i;
// last thread presets its number of particles in the memory req as well
if (idx == (N - 1))
{
*d_req_size_nlist = Nmax_i;
}
}
/*!
* \param d_req_size_nlist Flag for the total size of the neighbor list
* \param d_head_list The complete particle head list
* \param N the number of particles on this rank
*
* A single thread on the device is needed to complete the exclusive scan and find the size of the
* neighbor list. Because gpu_nlist_init_head_list_kernel() already set the number of neighbors for
* the last particle in d_req_size_nlist, the head index of the last particle is added to this
* number to get the total size.
*/
__global__ void gpu_nlist_get_nlist_size_kernel(size_t* d_req_size_nlist,
const size_t* d_head_list,
const unsigned int N)
{
*d_req_size_nlist += d_head_list[N - 1];
}
/*!
* \param d_head_list The head list of indexes to compute for reading the neighbor list
* \param d_req_size_nlist Flag for the total size of the neighbor list
* \param d_Nmax The number of neighbors to size per particle type
* \param d_pos Particle positions and types
* \param N the number of particles on this rank
* \param ntypes the number of types in the system
* \param block_size Number of threads per block for gpu_nlist_init_head_list_kernel()
*
* \return hipSuccess on completion
*
* \b Implementation
* \a d_head_list is filled with the number of neighbors per particle. An exclusive prefix sum is
* performed in place on \a d_head_list using the thrust libraries and a single thread is used to
* perform compute the total size of the neighbor list while still on device.
*/
hipError_t gpu_nlist_build_head_list(size_t* d_head_list,
size_t* d_req_size_nlist,
const unsigned int* d_Nmax,
const Scalar4* d_pos,
const unsigned int N,
const unsigned int ntypes,
const unsigned int block_size)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_nlist_init_head_list_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(block_size, max_block_size);
// initialize each particle with its number of neighbors
hipLaunchKernelGGL((gpu_nlist_init_head_list_kernel),
dim3(N / run_block_size + 1),
dim3(run_block_size),
0,
0,
d_head_list,
d_req_size_nlist,
d_Nmax,
d_pos,
N,
ntypes);
thrust::device_ptr<size_t> t_head_list = thrust::device_pointer_cast(d_head_list);
thrust::exclusive_scan(t_head_list, t_head_list + N, t_head_list);
hipLaunchKernelGGL((gpu_nlist_get_nlist_size_kernel),
dim3(1),
dim3(1),
0,
0,
d_req_size_nlist,
d_head_list,
N);
return hipSuccess;
}
} // end namespace kernel
} // end namespace md
} // end namespace hoomd
| 3e21b821c61c3e601e1d6630c70d041d7c6f1336.cu | // Copyright (c) 2009-2023 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
/*! \file NeighborListGPU.cu
\brief Defines GPU kernel code for neighbor list processing on the GPU
*/
#include "NeighborListGPU.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#pragma GCC diagnostic pop
namespace hoomd
{
namespace md
{
namespace kernel
{
/*! \param d_result Device pointer to a single uint. Will be set to 1 if an update is needed
\param d_last_pos Particle positions at the time the nlist was last updated
\param d_pos Current particle positions
\param nwork Number of particles this GPU processes
\param box Box dimensions
\param d_rcut_max The maximum rcut(i,j) that any particle of type i participates in
\param r_buff The buffer size that particles can move in
\param ntypes The number of particle types
\param lambda_min Minimum contraction of deformation tensor
\param lambda Diagonal deformation tensor (for orthorhombic boundaries)
\param checkn
gpu_nlist_needs_update_check_new_kernel() executes one thread per particle. Every particle's
current position is compared to its last position. If the particle has moved a distance more than
the buffer width, then *d_result is set to \a checkn.
*/
__global__ void gpu_nlist_needs_update_check_new_kernel(unsigned int* d_result,
const Scalar4* d_last_pos,
const Scalar4* d_pos,
const unsigned int nwork,
const BoxDim box,
const Scalar* d_rcut_max,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar lambda_min,
const Scalar3 lambda,
const unsigned int checkn,
const unsigned int offset)
{
// each thread will compare vs it's old position to see if the list needs updating
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nwork)
{
// get particle index
idx += offset;
Scalar4 cur_postype = d_pos[idx];
Scalar3 cur_pos = make_scalar3(cur_postype.x, cur_postype.y, cur_postype.z);
const unsigned int cur_type = __scalar_as_int(cur_postype.w);
Scalar4 last_postype = d_last_pos[idx];
Scalar3 last_pos = make_scalar3(last_postype.x, last_postype.y, last_postype.z);
Scalar3 dx = cur_pos - lambda * last_pos;
dx = box.minImage(dx);
const Scalar rmin = __ldg(d_rcut_max + cur_type);
const Scalar rmax = rmin + r_buff;
const Scalar delta_max = (rmax * lambda_min - rmin) / Scalar(2.0);
Scalar maxshiftsq = (delta_max > 0) ? delta_max * delta_max : 0.0f;
if (dot(dx, dx) >= maxshiftsq)
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(d_result, checkn);
#else
atomicMax(d_result, checkn);
#endif
}
}
hipError_t gpu_nlist_needs_update_check_new(unsigned int* d_result,
const Scalar4* d_last_pos,
const Scalar4* d_pos,
const unsigned int N,
const BoxDim& box,
const Scalar* d_rcut_max,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar lambda_min,
const Scalar3 lambda,
const unsigned int checkn,
const GPUPartition& gpu_partition)
{
unsigned int block_size = 128;
// iterate over active GPUs in reverse order
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
int n_blocks = nwork / block_size + 1;
hipLaunchKernelGGL((gpu_nlist_needs_update_check_new_kernel),
dim3(n_blocks),
dim3(block_size),
0,
0,
d_result,
d_last_pos,
d_pos,
nwork,
box,
d_rcut_max,
r_buff,
ntypes,
lambda_min,
lambda,
checkn,
range.first);
}
return hipSuccess;
}
//! Number of elements of the exclusion list to process in each batch
const unsigned int FILTER_BATCH_SIZE = 4;
/*! \param d_n_neigh Number of neighbors for each particle (read/write)
\param d_nlist Neighbor list for each particle (read/write)
\param nli Indexer for indexing into d_nlist
\param d_n_ex Number of exclusions for each particle
\param d_ex_list List of exclusions for each particle
\param exli Indexer for indexing into d_ex_list
\param N Number of particles
\param ex_start Start filtering the nlist from exclusion number \a ex_start
gpu_nlist_filter_kernel() processes the neighbor list \a d_nlist and removes any entries that
are excluded. To allow for an arbitrary large number of exclusions, these are processed in batch
sizes of FILTER_BATCH_SIZE. The kernel must be called multiple times in order to fully remove all
exclusions from the nlist.
\note The driver gpu_nlist_filter properly makes as many calls as are necessary, it only needs
to be called once.
\b Implementation
One thread is run for each particle. Exclusions \a ex_start, \a ex_start + 1, ... are loaded in
for that particle (or the thread returns if there are no exclusions past that point). The thread
then loops over the neighbor list, comparing each entry to the list of exclusions. If the entry
is not excluded, it is written back out. \a d_n_neigh is updated to reflect the current number of
particles in the list at the end of the kernel call.
*/
__global__ void gpu_nlist_filter_kernel(unsigned int* d_n_neigh,
unsigned int* d_nlist,
const size_t* d_head_list,
const unsigned int* d_n_ex,
const unsigned int* d_ex_list,
const Index2D exli,
const unsigned int N,
const unsigned int ex_start)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the particle list
if (idx >= N)
return;
const unsigned int n_neigh = d_n_neigh[idx];
const unsigned int n_ex = d_n_ex[idx];
unsigned int new_n_neigh = 0;
// quit now if the ex_start flag is past the end of n_ex
if (ex_start >= n_ex)
return;
// count the number of exclusions to process in this thread
const unsigned int n_ex_process = n_ex - ex_start;
// load the exclusion list into "local" memory - fully unrolled loops should dump this into
// registers
unsigned int l_ex_list[FILTER_BATCH_SIZE];
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_ex_idx < n_ex_process)
l_ex_list[cur_ex_idx] = d_ex_list[exli(idx, cur_ex_idx + ex_start)];
else
l_ex_list[cur_ex_idx] = 0xffffffff;
}
// loop over the list, regenerating it as we go
const size_t my_head = d_head_list[idx];
for (unsigned int cur_neigh_idx = 0; cur_neigh_idx < n_neigh; cur_neigh_idx++)
{
unsigned int cur_neigh = d_nlist[my_head + cur_neigh_idx];
// test if excluded
bool excluded = false;
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_neigh == l_ex_list[cur_ex_idx])
excluded = true;
}
// add it back to the list if it is not excluded
if (!excluded)
{
if (new_n_neigh != cur_neigh_idx)
d_nlist[my_head + new_n_neigh] = cur_neigh;
new_n_neigh++;
}
}
// update the number of neighbors
d_n_neigh[idx] = new_n_neigh;
}
hipError_t gpu_nlist_filter(unsigned int* d_n_neigh,
unsigned int* d_nlist,
const size_t* d_head_list,
const unsigned int* d_n_ex,
const unsigned int* d_ex_list,
const Index2D& exli,
const unsigned int N,
const unsigned int block_size)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_nlist_filter_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(block_size, max_block_size);
// determine parameters for kernel launch
int n_blocks = N / run_block_size + 1;
// split the processing of the full exclusion list up into a number of batches
unsigned int n_batches = (unsigned int)ceil(double(exli.getH()) / double(FILTER_BATCH_SIZE));
unsigned int ex_start = 0;
for (unsigned int batch = 0; batch < n_batches; batch++)
{
hipLaunchKernelGGL((gpu_nlist_filter_kernel),
dim3(n_blocks),
dim3(run_block_size),
0,
0,
d_n_neigh,
d_nlist,
d_head_list,
d_n_ex,
d_ex_list,
exli,
N,
ex_start);
ex_start += FILTER_BATCH_SIZE;
}
return hipSuccess;
}
//! GPU kernel to update the exclusions list
__global__ void gpu_update_exclusion_list_kernel(const unsigned int* tags,
const unsigned int* rtags,
const unsigned int* n_ex_tag,
const unsigned int* ex_list_tag,
const Index2D ex_list_tag_indexer,
unsigned int* n_ex_idx,
unsigned int* ex_list_idx,
const Index2D ex_list_indexer,
const unsigned int N)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
unsigned int tag = tags[idx];
unsigned int n = n_ex_tag[tag];
// copy over number of exclusions
n_ex_idx[idx] = n;
for (unsigned int offset = 0; offset < n; offset++)
{
unsigned int ex_tag = ex_list_tag[ex_list_tag_indexer(tag, offset)];
unsigned int ex_idx = rtags[ex_tag];
ex_list_idx[ex_list_indexer(idx, offset)] = ex_idx;
}
}
//! GPU function to update the exclusion list on the device
/*! \param d_tag Array of particle tags
\param d_rtag Array of reverse-lookup tag->idx
\param d_n_ex_tag List of number of exclusions per tag
\param d_ex_list_tag 2D Exclusion list per tag
\param ex_list_tag_indexer Indexer for per-tag exclusion list
\param d_n_ex_idx List of number of exclusions per idx
\param d_ex_list_idx Exclusion list per idx
\param ex_list_indexer Indexer for per-idx exclusion list
\param N number of particles
*/
hipError_t gpu_update_exclusion_list(const unsigned int* d_tag,
const unsigned int* d_rtag,
const unsigned int* d_n_ex_tag,
const unsigned int* d_ex_list_tag,
const Index2D& ex_list_tag_indexer,
unsigned int* d_n_ex_idx,
unsigned int* d_ex_list_idx,
const Index2D& ex_list_indexer,
const unsigned int N)
{
unsigned int block_size = 256;
hipLaunchKernelGGL((gpu_update_exclusion_list_kernel),
dim3(N / block_size + 1),
dim3(block_size),
0,
0,
d_tag,
d_rtag,
d_n_ex_tag,
d_ex_list_tag,
ex_list_tag_indexer,
d_n_ex_idx,
d_ex_list_idx,
ex_list_indexer,
N);
return hipSuccess;
}
//! GPU kernel to do a preliminary sizing on particles
/*!
* \param d_head_list The head list of indexes to overwrite
* \param d_req_size_nlist Flag for the required size of the neighbor list to overwrite
* \param d_Nmax The number of neighbors to size per particle type
* \param d_pos Particle positions and types
* \param N the number of particles on this rank
* \param ntypes the number of types in the system
*
* This kernel initializes the head list with the number of neighbors that each type expects from
* d_Nmax. A prefix sum is then performed in gpu_nlist_build_head_list() to accumulate starting
* indices.
*/
__global__ void gpu_nlist_init_head_list_kernel(size_t* d_head_list,
size_t* d_req_size_nlist,
const unsigned int* d_Nmax,
const Scalar4* d_pos,
const unsigned int N,
const unsigned int ntypes)
{
// particle index
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// one thread per particle
if (idx >= N)
return;
const Scalar4 postype_i = d_pos[idx];
const unsigned int type_i = __scalar_as_int(postype_i.w);
const unsigned int Nmax_i = __ldg(d_Nmax + type_i);
d_head_list[idx] = Nmax_i;
// last thread presets its number of particles in the memory req as well
if (idx == (N - 1))
{
*d_req_size_nlist = Nmax_i;
}
}
/*!
* \param d_req_size_nlist Flag for the total size of the neighbor list
* \param d_head_list The complete particle head list
* \param N the number of particles on this rank
*
* A single thread on the device is needed to complete the exclusive scan and find the size of the
* neighbor list. Because gpu_nlist_init_head_list_kernel() already set the number of neighbors for
* the last particle in d_req_size_nlist, the head index of the last particle is added to this
* number to get the total size.
*/
__global__ void gpu_nlist_get_nlist_size_kernel(size_t* d_req_size_nlist,
const size_t* d_head_list,
const unsigned int N)
{
*d_req_size_nlist += d_head_list[N - 1];
}
/*!
* \param d_head_list The head list of indexes to compute for reading the neighbor list
* \param d_req_size_nlist Flag for the total size of the neighbor list
* \param d_Nmax The number of neighbors to size per particle type
* \param d_pos Particle positions and types
* \param N the number of particles on this rank
* \param ntypes the number of types in the system
* \param block_size Number of threads per block for gpu_nlist_init_head_list_kernel()
*
* \return hipSuccess on completion
*
* \b Implementation
* \a d_head_list is filled with the number of neighbors per particle. An exclusive prefix sum is
* performed in place on \a d_head_list using the thrust libraries and a single thread is used to
* perform compute the total size of the neighbor list while still on device.
*/
hipError_t gpu_nlist_build_head_list(size_t* d_head_list,
size_t* d_req_size_nlist,
const unsigned int* d_Nmax,
const Scalar4* d_pos,
const unsigned int N,
const unsigned int ntypes,
const unsigned int block_size)
{
unsigned int max_block_size;
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_nlist_init_head_list_kernel);
max_block_size = attr.maxThreadsPerBlock;
unsigned int run_block_size = min(block_size, max_block_size);
// initialize each particle with its number of neighbors
hipLaunchKernelGGL((gpu_nlist_init_head_list_kernel),
dim3(N / run_block_size + 1),
dim3(run_block_size),
0,
0,
d_head_list,
d_req_size_nlist,
d_Nmax,
d_pos,
N,
ntypes);
thrust::device_ptr<size_t> t_head_list = thrust::device_pointer_cast(d_head_list);
thrust::exclusive_scan(t_head_list, t_head_list + N, t_head_list);
hipLaunchKernelGGL((gpu_nlist_get_nlist_size_kernel),
dim3(1),
dim3(1),
0,
0,
d_req_size_nlist,
d_head_list,
N);
return hipSuccess;
}
} // end namespace kernel
} // end namespace md
} // end namespace hoomd
|
8a0b529251b20559210a1095a0ec920fd2c48b1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <string>
#include <iostream>
#include <sstream>
#include <fstream>
#include <math.h>
#include <hip/hip_cooperative_groups.h>
using namespace std;
const static int U_FILE = 1;
const static int V_FILE = 2;
const static int P_FILE = 3;
const int row = 41;
const int col = 41;
// 1024 = > CUDA error: too many blocks in cooperative launch
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#grid-synchronization-cg
int THREAD_NUM = 512;
// const int BLOCK_NUM = row;
// const int THREAD_NUM = col;
const double dx_cpu = 2.0 / (col - 1);
const double dy_cpu = 2.0 / (row - 1);
__device__ int nx = col;
__device__ int ny = row;
__device__ int nt = 500;
__device__ int nit = 50;
__device__ double c = 1.0;
// double dx = 2.0 / (nx - 1);
__device__ double dx = dx_cpu;
// double dy = 2.0 / (ny - 1);
__device__ double dy = dy_cpu;
__device__ double rho = 1.0;
__device__ double nu = 0.1;
__device__ double dt = 0.001;
// __device__ __managed__ int count;
template < typename Type > std::string to_str (const Type & t)
{
std::ostringstream os;
os << t;
return os.str ();
}
//2-d
string array_2d_to_json(double *vector,int row,int col){
string result = "[";
for(int i=0;i<row;i++){
result += "[";
for(int j=0;j<col;j++){
result += to_str(vector[i*col+j]);
if( j!=col-1 ){
result += ",";
}
}
result += "]";
if( i!=row-1 ){
result += ",";
}
}
return result+"]";
}
void print_array(double *vector,int row,int col){
for(int i=0;i<row;i++){
for(int j=0;j<col;j++){
printf("%f,",vector[i*col+j]);
}
printf("\n");
}
}
void write_string_to_file(string str,int flag){
ofstream outfile;
switch(flag){
case U_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/u_json.txt");
break;
case V_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/v_json.txt");
break;
case P_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/p_json.txt");
break;
default:
break;
}
outfile << str << endl;
outfile.close();
}
void zeros(double *vector,int row,int col){
for(int i=0;i<row*col;i++){
vector[i] = 0.0;
}
}
//2-d's index to 1-d's index
__device__ int index(int j,int i){
if( j*nx+i >= nx * ny ){
printf("over index:%d,%d\n",j,i);
}
//nx is col
return j*nx+i;
}
__device__ void zeros_gpu(double *vector,int row,int col){
// printf("zeros_gpu\n");
for(int i=0;i<row*col;i++){
vector[i] = 0.0;
}
// printf("end\n");
}
//,
__device__ void copy(double *copy,double *origin){
// printf("copy\n");
for(int i=0;i<nx * ny;i++){
copy[i] = origin[i];
}
}
__device__ void judge_over_index(int index){
if( index >= nx * ny ){
printf("over index:%d,%d\n",index);
}
}
__device__ void build_up_b_single_thread(cooperative_groups::grid_group grid,double *b,double *u, double *v){
// printf("build_up_b:%d\n",threadId);
int row = ny,col = nx;
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
for(int j=1;j<row-1;j++){
for(int i=1;i<col-1;i++){
// b[j][i] = (rho * (1 / dt *
// ((u[j][i+1] - u[j][i-1]) /
// (2 * dx) + (v[j+1][i] - v[j-1][i]) / (2 * dy)) -
// pow(((u[j][i+1] - u[j][i-1]) / (2 * dx)),2) -
// 2 * ((u[j+1][i] - u[j-1][i]) / (2 * dy) *
// (v[j][i+1] - v[j][i-1]) / (2 * dx))-
// pow(((v[j+1][i] - v[j-1][i]) / (2 * dy)),2)));
b[index(j,i)] = (rho * (1 / dt *
((u[index(j,i+1)] - u[index(j,i-1)]) /
(2 * dx) + (v[index(j+1,i)] - v[index(j-1,i)]) / (2 * dy)) -
pow(((u[index(j,i+1)] - u[index(j,i-1)]) / (2 * dx)),2) -
2 * ((u[index(j+1,i)] - u[index(j-1,i)]) / (2 * dy) *
(v[index(j,i+1)] - v[index(j,i-1)]) / (2 * dx))-
pow(((v[index(j+1,i)] - v[index(j-1,i)]) / (2 * dy)),2)));
}
}
}
//
__device__ void build_up_b(cooperative_groups::grid_group grid,int threadId,double *b,double *u, double *v){
// if( blockId < 1 && threadId < 1 ){
// printf("build_up_b\n");
// }
// printf("build_up_b:%d\n",threadId);
int row = ny,col = nx;
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
b[threadId] = (rho * (1 / dt *
((u[threadId+1] - u[threadId-1]) /
(2 * dx) + (v[threadId+col] - v[threadId-col]) / (2 * dy)) -
pow(((u[threadId+1] - u[threadId-1]) / (2 * dx)),2) -
2 * ((u[threadId+col] - u[threadId-col]) / (2 * dy) *
(v[threadId+1] - v[threadId-1]) / (2 * dx))-
pow(((v[threadId+col] - v[threadId-col]) / (2 * dy)),2)));
// for(int j=1;j<row-1;j++){
// for(int i=1;i<col-1;i++){
// // b[j][i] = (rho * (1 / dt *
// // ((u[j][i+1] - u[j][i-1]) /
// // (2 * dx) + (v[j+1][i] - v[j-1][i]) / (2 * dy)) -
// // pow(((u[j][i+1] - u[j][i-1]) / (2 * dx)),2) -
// // 2 * ((u[j+1][i] - u[j-1][i]) / (2 * dy) *
// // (v[j][i+1] - v[j][i-1]) / (2 * dx))-
// // pow(((v[j+1][i] - v[j-1][i]) / (2 * dy)),2)));
// b[index(j,i)] = (rho * (1 / dt *
// ((u[index(j,i+1)] - u[index(j,i-1)]) /
// (2 * dx) + (v[index(j+1,i)] - v[index(j-1,i)]) / (2 * dy)) -
// pow(((u[index(j,i+1)] - u[index(j,i-1)]) / (2 * dx)),2) -
// 2 * ((u[index(j+1,i)] - u[index(j-1,i)]) / (2 * dy) *
// (v[index(j,i+1)] - v[index(j,i-1)]) / (2 * dx))-
// pow(((v[index(j+1,i)] - v[index(j-1,i)]) / (2 * dy)),2)));
// }
// }
}
__device__ void pressure_poisson_single_thread(cooperative_groups::grid_group grid,double *p,double *b,double *pn){
copy(pn,p);
int row = ny,col = nx;
//q-loop have Data dependence
for(int q=0;q<nit;q++){
copy(pn,p);
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
for(int j=1;j<row-1;j++){
for(int i=1;i<col-1;i++){
// p[j][i] = (((pn[j][i+1] + pn[j][i-1]) * pow(dy,2) +
// (pn[j+1][i] + pn[j-1][i]) * pow(dx,2)) /
// (2 * (pow(dx,2) + pow(dy,2))) -
// pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
// b[j][i]);
p[index(j,i)] = (((pn[index(j,i+1)] + pn[index(j,i-1)]) * pow(dy,2) +
(pn[index(j+1,i)] + pn[index(j-1,i)]) * pow(dx,2)) /
(2 * (pow(dx,2) + pow(dy,2))) -
pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
b[index(j,i)]);
}
}
for(int j=0;j<row;j++){
//p[:, -1] = p[:, -2] # dp/dx = 0 at x = 2
// p[j][col-1] = p[j][col-2];
p[index(j,col-1)] = p[index(j,col-2)];
}
for(int i=0;i<col;i++){
//p[0, :] = p[1, :] # dp/dy = 0 at y = 0
// p[0][i] = p[1][i];
p[index(0,i)] = p[index(1,i)];
}
for(int j=0;j<row;j++){
//p[:, 0] = p[:, 1] # dp/dx = 0 at x = 0
// p[j][0] = p[j][1];
p[index(j,0)] = p[index(j,1)];
}
for(int i=0;i<col;i++){
//p[-1, :] = 0 # p = 0 at y = 2
// p[row-1][i] = 0.0;
p[index(row-1,i)] = 0.0;
}
//lock
}
}
//
__device__ void pressure_poisson(cooperative_groups::grid_group grid,int threadId,double *p,double *b,double *pn){
// if( blockId < 1 && threadId < 1 ){
// printf("pressure_poisson\n");
// }
// printf("pressure_poisson:%d\n",threadId);
// double *pn;
// pn = (double *)malloc(nx * ny * sizeof(double));
// copy(pn,p);
int row = ny,col = nx;
//q-loop have Data dependence
for(int q=0;q<nit;q++){
// copy(pn,p);
pn[threadId] = p[threadId];
grid.sync();
p[threadId] = (((pn[threadId+1] + pn[threadId-1]) * pow(dy,2) +
(pn[threadId+col] + pn[threadId-col]) * pow(dx,2)) /
(2 * (pow(dx,2) + pow(dy,2))) -
pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
b[threadId]);
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
// for(int j=1;j<row-1;j++){
// for(int i=1;i<col-1;i++){
// // p[j][i] = (((pn[j][i+1] + pn[j][i-1]) * pow(dy,2) +
// // (pn[j+1][i] + pn[j-1][i]) * pow(dx,2)) /
// // (2 * (pow(dx,2) + pow(dy,2))) -
// // pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
// // b[j][i]);
// p[index(j,i)] = (((pn[index(j,i+1)] + pn[index(j,i-1)]) * pow(dy,2) +
// (pn[index(j+1,i)] + pn[index(j-1,i)]) * pow(dx,2)) /
// (2 * (pow(dx,2) + pow(dy,2))) -
// pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
// b[index(j,i)]);
// }
// }
grid.sync();
for(int j=0;j<row;j++){
//p[:, -1] = p[:, -2] # dp/dx = 0 at x = 2
// p[j][col-1] = p[j][col-2];
p[index(j,col-1)] = p[index(j,col-2)];
}
grid.sync();
for(int i=0;i<col;i++){
//p[0, :] = p[1, :] # dp/dy = 0 at y = 0
// p[0][i] = p[1][i];
p[index(0,i)] = p[index(1,i)];
}
grid.sync();
for(int j=0;j<row;j++){
//p[:, 0] = p[:, 1] # dp/dx = 0 at x = 0
// p[j][0] = p[j][1];
p[index(j,0)] = p[index(j,1)];
}
grid.sync();
for(int i=0;i<col;i++){
//p[-1, :] = 0 # p = 0 at y = 2
// p[row-1][i] = 0.0;
p[index(row-1,i)] = 0.0;
}
grid.sync();
//lock
}
// free(pn);
// pn = NULL;
}
__device__ void cavity_flow_single_thread(cooperative_groups::grid_group grid,int threadId, double *u, double *v, double *p,double *b,double *un,double *vn,double *pn){
int row = ny,col = nx;
// zeros_gpu(b,ny,nx);
for(int n=0;n<nt;n++){
// copy
if( threadId == 60 ){
copy(un,u);
copy(vn,v);
copy(pn,p);
//
if( threadId / col == 0 || threadId / col == row-1 || threadId % col == 0 || threadId % col == col-1 ){
continue;
}
build_up_b_single_thread(grid,b,u,v);
pressure_poisson_single_thread(grid,p, b,pn);
for(int j=1;j<row-1;j++){
for(int i=1;i<col-1;i++){
// u[j][i] = (un[j][i]-
// un[j][i] * dt / dx *
// (un[j][i] - un[j][i-1]) -
// vn[j][i] * dt / dy *
// (un[j][i] - un[j-1][i]) -
// dt / (2 * rho * dx) * (p[j][i+1] - p[j][i-1]) +
// nu * (dt / pow(dx,2) *
// (un[j][i+1] - 2 * un[j][i] + un[j][i-1]) +
// dt / pow(dy,2) *
// (un[j+1][i] - 2 * un[j][i] + un[j-1][i])));
u[index(j,i)] = (un[index(j,i)]-
un[index(j,i)] * dt / dx *
(un[index(j,i)] - un[index(j,i-1)]) -
vn[index(j,i)] * dt / dy *
(un[index(j,i)] - un[index(j-1,i)]) -
dt / (2 * rho * dx) * (p[index(j,i+1)] - p[index(j,i-1)]) +
nu * (dt / pow(dx,2) *
(un[index(j,i+1)] - 2 * un[index(j,i)] + un[index(j,i-1)]) +
dt / pow(dy,2) *
(un[index(j+1,i)] - 2 * un[index(j,i)] + un[index(j-1,i)])));
}
}
for(int j=1;j<row-1;j++){
for(int i=1;i<col-1;i++){
// v[j][i] = (vn[j][i] -
// un[j][i] * dt / dx *
// (vn[j][i] - vn[j][i-1]) -
// vn[j][i] * dt / dy *
// (vn[j][i] - vn[j-1][i]) -
// dt / (2 * rho * dy) * (p[j+1][i] - p[j-1][i]) +
// nu * (dt / pow(dx,2) *
// (vn[j][i+1] - 2 * vn[j][i] + vn[j][i-1]) +
// dt / pow(dy,2) *
// (vn[j+1][i] - 2 * vn[j][i] + vn[j-1][i])));
v[index(j,i)] = (vn[index(j,i)] -
un[index(j,i)] * dt / dx *
(vn[index(j,i)] - vn[index(j,i-1)]) -
vn[index(j,i)] * dt / dy *
(vn[index(j,i)] - vn[index(j-1,i)]) -
dt / (2 * rho * dy) * (p[index(j+1,i)] - p[index(j-1,i)]) +
nu * (dt / pow(dx,2) *
(vn[index(j,i+1)] - 2 * vn[index(j,i)] + vn[index(j,i-1)]) +
dt / pow(dy,2) *
(vn[index(j+1,i)] - 2 * vn[index(j,i)] + vn[index(j-1,i)])));
}
}
for(int i=0;i<col;i++){
// u[0, :] = 0
// u[0][i] = 0;
u[index(0,i)] = 0;
}
for(int j=0;j<row;j++){
// u[:, 0] = 0
// u[j][0] = 0;
u[index(j,0)] = 0;
}
for(int j=0;j<row;j++){
// u[:, -1] = 0
// u[j][col-1] = 0;
u[index(j,col-1)] = 0;
}
for(int i=0;i<col;i++){
// u[-1, :] = 1 # set velocity on cavity lid equal to 1
// u[row-1][i] = 1;
u[index(row-1,i)] = 1;
}
for(int i=0;i<col;i++){
// v[0, :] = 0
// v[0][i] = 0;
v[index(0,i)] = 0;
}
for(int i=0;i<col;i++){
// v[-1, :] = 0
// v[row-1][i] = 0;
v[index(row-1,i)] = 0;
}
for(int j=0;j<row;j++){
// v[:, 0] = 0
// v[j][0] = 0;
v[index(j,0)] = 0;
}
for(int j=0;j<row;j++){
// v[:, -1] = 0
// v[j][col-1] = 0;
v[index(j,col-1)] = 0;
}
}
}
printf("end:%d\n",threadId);
}
__device__ void cavity_flow(cooperative_groups::grid_group grid,int threadId, double *u, double *v, double *p,double *b,double *un,double *vn,double *pn){
// if( threadId > 1599 ){
// printf("cavity_flow:%d,total:%d\n",threadId < nx * ny,nx * ny);
// }
// if( blockId < 1 && threadId < 1 ){
// printf("cavity_flow\n");
// }
// printf("cavity_flow\n");
// double *un,*vn;
// un = (double *)malloc(nx * ny * sizeof(double));
// vn = (double *)malloc(nx * ny * sizeof(double));
int row = ny,col = nx;
// zeros_gpu(b,ny,nx);
for(int n=0;n<nt;n++){
// printf("threadId:%d,n:%d\n",threadId,n);
// atomicAdd(&count, 1);
// printf("%d\n",n);
// if( blockId < 1 && threadId < 1 ){
// printf("cavity_flow\n");
// }
// copy
un[threadId] = u[threadId];
vn[threadId] = v[threadId];
pn[threadId] = p[threadId];
// // printf("first\n");
grid.sync();
// printf("second\n");
//
if( threadId / col == 0 || threadId / col == row-1 || threadId % col == 0 || threadId % col == col-1 ){
// printf("threadId:%d\n",threadId);
continue;
}
// copy(un,u);
// copy(vn,v);
// printf("---\n");
// change b
build_up_b(grid,threadId,b, u, v);
grid.sync();
// change p
pressure_poisson(grid,threadId,p, b,pn);
grid.sync();
//lock
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
// for(int j=1;j<row-1;j++){
// for(int i=1;i<col-1;i++){
// // u[j][i] = (un[j][i]-
// // un[j][i] * dt / dx *
// // (un[j][i] - un[j][i-1]) -
// // vn[j][i] * dt / dy *
// // (un[j][i] - un[j-1][i]) -
// // dt / (2 * rho * dx) * (p[j][i+1] - p[j][i-1]) +
// // nu * (dt / pow(dx,2) *
// // (un[j][i+1] - 2 * un[j][i] + un[j][i-1]) +
// // dt / pow(dy,2) *
// // (un[j+1][i] - 2 * un[j][i] + un[j-1][i])));
// u[index(j,i)] = (un[index(j,i)]-
// un[index(j,i)] * dt / dx *
// (un[index(j,i)] - un[index(j,i-1)]) -
// vn[index(j,i)] * dt / dy *
// (un[index(j,i)] - un[index(j-1,i)]) -
// dt / (2 * rho * dx) * (p[index(j,i+1)] - p[index(j,i-1)]) +
// nu * (dt / pow(dx,2) *
// (un[index(j,i+1)] - 2 * un[index(j,i)] + un[index(j,i-1)]) +
// dt / pow(dy,2) *
// (un[index(j+1,i)] - 2 * un[index(j,i)] + un[index(j-1,i)])));
// }
// }
u[threadId] = (un[threadId]-
un[threadId] * dt / dx *
(un[threadId] - un[threadId-1]) -
vn[threadId] * dt / dy *
(un[threadId] - un[threadId-col]) -
dt / (2 * rho * dx) * (p[threadId+1] - p[threadId-1]) +
nu * (dt / pow(dx,2) *
(un[threadId+1] - 2 * un[threadId] + un[threadId-1]) +
dt / pow(dy,2) *
(un[threadId+col] - 2 * un[threadId] + un[threadId-col])));
grid.sync();
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
// for(int j=1;j<row-1;j++){
// for(int i=1;i<col-1;i++){
// // v[j][i] = (vn[j][i] -
// // un[j][i] * dt / dx *
// // (vn[j][i] - vn[j][i-1]) -
// // vn[j][i] * dt / dy *
// // (vn[j][i] - vn[j-1][i]) -
// // dt / (2 * rho * dy) * (p[j+1][i] - p[j-1][i]) +
// // nu * (dt / pow(dx,2) *
// // (vn[j][i+1] - 2 * vn[j][i] + vn[j][i-1]) +
// // dt / pow(dy,2) *
// // (vn[j+1][i] - 2 * vn[j][i] + vn[j-1][i])));
// v[index(j,i)] = (vn[index(j,i)] -
// un[index(j,i)] * dt / dx *
// (vn[index(j,i)] - vn[index(j,i-1)]) -
// vn[index(j,i)] * dt / dy *
// (vn[index(j,i)] - vn[index(j-1,i)]) -
// dt / (2 * rho * dy) * (p[index(j+1,i)] - p[index(j-1,i)]) +
// nu * (dt / pow(dx,2) *
// (vn[index(j,i+1)] - 2 * vn[index(j,i)] + vn[index(j,i-1)]) +
// dt / pow(dy,2) *
// (vn[index(j+1,i)] - 2 * vn[index(j,i)] + vn[index(j-1,i)])));
// }
// }
v[threadId] = (vn[threadId] -
un[threadId] * dt / dx *
(vn[threadId] - vn[threadId-1]) -
vn[threadId] * dt / dy *
(vn[threadId] - vn[threadId-col]) -
dt / (2 * rho * dy) * (p[threadId+col] - p[threadId-col]) +
nu * (dt / pow(dx,2) *
(vn[threadId+1] - 2 * vn[threadId] + vn[threadId-1]) +
dt / pow(dy,2) *
(vn[threadId+col] - 2 * vn[threadId] + vn[threadId-col])));
grid.sync();
for(int i=0;i<col;i++){
// u[0, :] = 0
// u[0][i] = 0;
u[index(0,i)] = 0;
}
grid.sync();
for(int j=0;j<row;j++){
// u[:, 0] = 0
// u[j][0] = 0;
u[index(j,0)] = 0;
}
grid.sync();
for(int j=0;j<row;j++){
// u[:, -1] = 0
// u[j][col-1] = 0;
u[index(j,col-1)] = 0;
}
grid.sync();
for(int i=0;i<col;i++){
// u[-1, :] = 1 # set velocity on cavity lid equal to 1
// u[row-1][i] = 1;
u[index(row-1,i)] = 1;
}
grid.sync();
for(int i=0;i<col;i++){
// v[0, :] = 0
// v[0][i] = 0;
v[index(0,i)] = 0;
}
grid.sync();
for(int i=0;i<col;i++){
// v[-1, :] = 0
// v[row-1][i] = 0;
v[index(row-1,i)] = 0;
}
grid.sync();
for(int j=0;j<row;j++){
// v[:, 0] = 0
// v[j][0] = 0;
v[index(j,0)] = 0;
}
grid.sync();
for(int j=0;j<row;j++){
// v[:, -1] = 0
// v[j][col-1] = 0;
v[index(j,col-1)] = 0;
}
grid.sync();
//lock
}
// free(un);
// free(vn);
// un = NULL;
// vn = NULL;
printf("end:%d\n",threadId);
}
__global__ void kernel(double *u,double *v,double *p,double *b,double *un,double *vn,double *pn){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if( threadId < nx * ny ){
// printf("threadId:%d\n",threadId);
cooperative_groups::grid_group grid = cooperative_groups::this_grid();
// cavity_flow(grid,threadId,u,v,p,b,un,vn,pn);
cavity_flow(grid,threadId,u,v,p,b,un,vn,pn);
}
}
//nvcc cuda_cavity.cu -arch=sm_60 -rdc=true
int main(){
//2-d
// double *u,*v,*p,*b,*un,*vn,*pn,*bn;
double *u,*v,*p,*b,*un,*vn,*pn;
hipMallocManaged(&u, row*col*sizeof(double));
hipMallocManaged(&v, row*col*sizeof(double));
hipMallocManaged(&p, row*col*sizeof(double));
hipMallocManaged(&b, row*col*sizeof(double));
hipMallocManaged(&un, row*col*sizeof(double));
hipMallocManaged(&vn, row*col*sizeof(double));
hipMallocManaged(&pn, row*col*sizeof(double));
zeros(u,row,col);
zeros(v,row,col);
zeros(p,row,col);
zeros(b,row,col);
void *args[] = {(void *)&u, (void *)&v, (void *)&p, (void *)&b,(void *)&u, (void *)&v, (void *)&p};
int total = row * col;
int BLOCK_NUM = total / THREAD_NUM ;
if( total % THREAD_NUM > 0 ){
BLOCK_NUM++;
}
printf("total:%d\n",total);
printf("BLOCK_NUM:%d\n",BLOCK_NUM);
printf("THREAD_NUM:%d\n",THREAD_NUM);
hipLaunchCooperativeKernel((void*)kernel, BLOCK_NUM, THREAD_NUM, args);
hipError_t error = hipGetLastError();
printf("CUDA error: %s\n", hipGetErrorString(error));
hipDeviceSynchronize();
hipError_t error2 = hipGetLastError();
printf("CUDA error: %s\n", hipGetErrorString(error2));
// printf("count:%d\n",count);
string u_json = array_2d_to_json(u,row,col),
v_json = array_2d_to_json(v,row,col),
p_json = array_2d_to_json(p,row,col);
write_string_to_file(u_json,U_FILE);
write_string_to_file(v_json,V_FILE);
write_string_to_file(p_json,P_FILE);
hipFree(u);
hipFree(v);
hipFree(p);
hipFree(b);
hipFree(un);
hipFree(vn);
hipFree(pn);
} | 8a0b529251b20559210a1095a0ec920fd2c48b1d.cu | #include <cstdio>
#include <cstdlib>
#include <vector>
#include <string>
#include <iostream>
#include <sstream>
#include <fstream>
#include <math.h>
#include <cooperative_groups.h>
using namespace std;
const static int U_FILE = 1;
const static int V_FILE = 2;
const static int P_FILE = 3;
const int row = 41;
const int col = 41;
// 1024 = > CUDA error: too many blocks in cooperative launch
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#grid-synchronization-cg
int THREAD_NUM = 512;
// const int BLOCK_NUM = row;
// const int THREAD_NUM = col;
const double dx_cpu = 2.0 / (col - 1);
const double dy_cpu = 2.0 / (row - 1);
__device__ int nx = col;
__device__ int ny = row;
__device__ int nt = 500;
__device__ int nit = 50;
__device__ double c = 1.0;
// double dx = 2.0 / (nx - 1);
__device__ double dx = dx_cpu;
// double dy = 2.0 / (ny - 1);
__device__ double dy = dy_cpu;
__device__ double rho = 1.0;
__device__ double nu = 0.1;
__device__ double dt = 0.001;
// __device__ __managed__ int count;
template < typename Type > std::string to_str (const Type & t)
{
std::ostringstream os;
os << t;
return os.str ();
}
//2-d
string array_2d_to_json(double *vector,int row,int col){
string result = "[";
for(int i=0;i<row;i++){
result += "[";
for(int j=0;j<col;j++){
result += to_str(vector[i*col+j]);
if( j!=col-1 ){
result += ",";
}
}
result += "]";
if( i!=row-1 ){
result += ",";
}
}
return result+"]";
}
void print_array(double *vector,int row,int col){
for(int i=0;i<row;i++){
for(int j=0;j<col;j++){
printf("%f,",vector[i*col+j]);
}
printf("\n");
}
}
void write_string_to_file(string str,int flag){
ofstream outfile;
switch(flag){
case U_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/u_json.txt");
break;
case V_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/v_json.txt");
break;
case P_FILE:
outfile.open("/home/9/19M38171/t3workspace/hpc-lecture/final_report/p_json.txt");
break;
default:
break;
}
outfile << str << endl;
outfile.close();
}
void zeros(double *vector,int row,int col){
for(int i=0;i<row*col;i++){
vector[i] = 0.0;
}
}
//2-d's index to 1-d's index
__device__ int index(int j,int i){
if( j*nx+i >= nx * ny ){
printf("over index:%d,%d\n",j,i);
}
//nx is col
return j*nx+i;
}
__device__ void zeros_gpu(double *vector,int row,int col){
// printf("zeros_gpu\n");
for(int i=0;i<row*col;i++){
vector[i] = 0.0;
}
// printf("end\n");
}
//先假设是单线程,后面改为单个复制,这样子就没有调用的必要了,这里会不会有死锁的问题
__device__ void copy(double *copy,double *origin){
// printf("copy\n");
for(int i=0;i<nx * ny;i++){
copy[i] = origin[i];
}
}
__device__ void judge_over_index(int index){
if( index >= nx * ny ){
printf("over index:%d,%d\n",index);
}
}
__device__ void build_up_b_single_thread(cooperative_groups::grid_group grid,double *b,double *u, double *v){
// printf("build_up_b:%d\n",threadId);
int row = ny,col = nx;
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
for(int j=1;j<row-1;j++){
for(int i=1;i<col-1;i++){
// b[j][i] = (rho * (1 / dt *
// ((u[j][i+1] - u[j][i-1]) /
// (2 * dx) + (v[j+1][i] - v[j-1][i]) / (2 * dy)) -
// pow(((u[j][i+1] - u[j][i-1]) / (2 * dx)),2) -
// 2 * ((u[j+1][i] - u[j-1][i]) / (2 * dy) *
// (v[j][i+1] - v[j][i-1]) / (2 * dx))-
// pow(((v[j+1][i] - v[j-1][i]) / (2 * dy)),2)));
b[index(j,i)] = (rho * (1 / dt *
((u[index(j,i+1)] - u[index(j,i-1)]) /
(2 * dx) + (v[index(j+1,i)] - v[index(j-1,i)]) / (2 * dy)) -
pow(((u[index(j,i+1)] - u[index(j,i-1)]) / (2 * dx)),2) -
2 * ((u[index(j+1,i)] - u[index(j-1,i)]) / (2 * dy) *
(v[index(j,i+1)] - v[index(j,i-1)]) / (2 * dx))-
pow(((v[index(j+1,i)] - v[index(j-1,i)]) / (2 * dy)),2)));
}
}
}
//先假设是单线程,这里不考虑边界越界
__device__ void build_up_b(cooperative_groups::grid_group grid,int threadId,double *b,double *u, double *v){
// if( blockId < 1 && threadId < 1 ){
// printf("build_up_b\n");
// }
// printf("build_up_b:%d\n",threadId);
int row = ny,col = nx;
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
b[threadId] = (rho * (1 / dt *
((u[threadId+1] - u[threadId-1]) /
(2 * dx) + (v[threadId+col] - v[threadId-col]) / (2 * dy)) -
pow(((u[threadId+1] - u[threadId-1]) / (2 * dx)),2) -
2 * ((u[threadId+col] - u[threadId-col]) / (2 * dy) *
(v[threadId+1] - v[threadId-1]) / (2 * dx))-
pow(((v[threadId+col] - v[threadId-col]) / (2 * dy)),2)));
// for(int j=1;j<row-1;j++){
// for(int i=1;i<col-1;i++){
// // b[j][i] = (rho * (1 / dt *
// // ((u[j][i+1] - u[j][i-1]) /
// // (2 * dx) + (v[j+1][i] - v[j-1][i]) / (2 * dy)) -
// // pow(((u[j][i+1] - u[j][i-1]) / (2 * dx)),2) -
// // 2 * ((u[j+1][i] - u[j-1][i]) / (2 * dy) *
// // (v[j][i+1] - v[j][i-1]) / (2 * dx))-
// // pow(((v[j+1][i] - v[j-1][i]) / (2 * dy)),2)));
// b[index(j,i)] = (rho * (1 / dt *
// ((u[index(j,i+1)] - u[index(j,i-1)]) /
// (2 * dx) + (v[index(j+1,i)] - v[index(j-1,i)]) / (2 * dy)) -
// pow(((u[index(j,i+1)] - u[index(j,i-1)]) / (2 * dx)),2) -
// 2 * ((u[index(j+1,i)] - u[index(j-1,i)]) / (2 * dy) *
// (v[index(j,i+1)] - v[index(j,i-1)]) / (2 * dx))-
// pow(((v[index(j+1,i)] - v[index(j-1,i)]) / (2 * dy)),2)));
// }
// }
}
__device__ void pressure_poisson_single_thread(cooperative_groups::grid_group grid,double *p,double *b,double *pn){
copy(pn,p);
int row = ny,col = nx;
//q-loop have Data dependence
for(int q=0;q<nit;q++){
copy(pn,p);
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
for(int j=1;j<row-1;j++){
for(int i=1;i<col-1;i++){
// p[j][i] = (((pn[j][i+1] + pn[j][i-1]) * pow(dy,2) +
// (pn[j+1][i] + pn[j-1][i]) * pow(dx,2)) /
// (2 * (pow(dx,2) + pow(dy,2))) -
// pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
// b[j][i]);
p[index(j,i)] = (((pn[index(j,i+1)] + pn[index(j,i-1)]) * pow(dy,2) +
(pn[index(j+1,i)] + pn[index(j-1,i)]) * pow(dx,2)) /
(2 * (pow(dx,2) + pow(dy,2))) -
pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
b[index(j,i)]);
}
}
for(int j=0;j<row;j++){
//p[:, -1] = p[:, -2] # dp/dx = 0 at x = 2
// p[j][col-1] = p[j][col-2];
p[index(j,col-1)] = p[index(j,col-2)];
}
for(int i=0;i<col;i++){
//p[0, :] = p[1, :] # dp/dy = 0 at y = 0
// p[0][i] = p[1][i];
p[index(0,i)] = p[index(1,i)];
}
for(int j=0;j<row;j++){
//p[:, 0] = p[:, 1] # dp/dx = 0 at x = 0
// p[j][0] = p[j][1];
p[index(j,0)] = p[index(j,1)];
}
for(int i=0;i<col;i++){
//p[-1, :] = 0 # p = 0 at y = 2
// p[row-1][i] = 0.0;
p[index(row-1,i)] = 0.0;
}
//lock
}
}
//先假设是单线程
__device__ void pressure_poisson(cooperative_groups::grid_group grid,int threadId,double *p,double *b,double *pn){
// if( blockId < 1 && threadId < 1 ){
// printf("pressure_poisson\n");
// }
// printf("pressure_poisson:%d\n",threadId);
// double *pn;
// pn = (double *)malloc(nx * ny * sizeof(double));
// copy(pn,p);
int row = ny,col = nx;
//q-loop have Data dependence
for(int q=0;q<nit;q++){
// copy(pn,p);
pn[threadId] = p[threadId];
grid.sync();
p[threadId] = (((pn[threadId+1] + pn[threadId-1]) * pow(dy,2) +
(pn[threadId+col] + pn[threadId-col]) * pow(dx,2)) /
(2 * (pow(dx,2) + pow(dy,2))) -
pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
b[threadId]);
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
// for(int j=1;j<row-1;j++){
// for(int i=1;i<col-1;i++){
// // p[j][i] = (((pn[j][i+1] + pn[j][i-1]) * pow(dy,2) +
// // (pn[j+1][i] + pn[j-1][i]) * pow(dx,2)) /
// // (2 * (pow(dx,2) + pow(dy,2))) -
// // pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
// // b[j][i]);
// p[index(j,i)] = (((pn[index(j,i+1)] + pn[index(j,i-1)]) * pow(dy,2) +
// (pn[index(j+1,i)] + pn[index(j-1,i)]) * pow(dx,2)) /
// (2 * (pow(dx,2) + pow(dy,2))) -
// pow(dx,2) * pow(dy,2) / (2 * (pow(dx,2) + pow(dy,2))) *
// b[index(j,i)]);
// }
// }
grid.sync();
for(int j=0;j<row;j++){
//p[:, -1] = p[:, -2] # dp/dx = 0 at x = 2
// p[j][col-1] = p[j][col-2];
p[index(j,col-1)] = p[index(j,col-2)];
}
grid.sync();
for(int i=0;i<col;i++){
//p[0, :] = p[1, :] # dp/dy = 0 at y = 0
// p[0][i] = p[1][i];
p[index(0,i)] = p[index(1,i)];
}
grid.sync();
for(int j=0;j<row;j++){
//p[:, 0] = p[:, 1] # dp/dx = 0 at x = 0
// p[j][0] = p[j][1];
p[index(j,0)] = p[index(j,1)];
}
grid.sync();
for(int i=0;i<col;i++){
//p[-1, :] = 0 # p = 0 at y = 2
// p[row-1][i] = 0.0;
p[index(row-1,i)] = 0.0;
}
grid.sync();
//lock
}
// free(pn);
// pn = NULL;
}
__device__ void cavity_flow_single_thread(cooperative_groups::grid_group grid,int threadId, double *u, double *v, double *p,double *b,double *un,double *vn,double *pn){
int row = ny,col = nx;
// zeros_gpu(b,ny,nx);
for(int n=0;n<nt;n++){
// copy
if( threadId == 60 ){
copy(un,u);
copy(vn,v);
copy(pn,p);
//边界判断
if( threadId / col == 0 || threadId / col == row-1 || threadId % col == 0 || threadId % col == col-1 ){
continue;
}
build_up_b_single_thread(grid,b,u,v);
pressure_poisson_single_thread(grid,p, b,pn);
for(int j=1;j<row-1;j++){
for(int i=1;i<col-1;i++){
// u[j][i] = (un[j][i]-
// un[j][i] * dt / dx *
// (un[j][i] - un[j][i-1]) -
// vn[j][i] * dt / dy *
// (un[j][i] - un[j-1][i]) -
// dt / (2 * rho * dx) * (p[j][i+1] - p[j][i-1]) +
// nu * (dt / pow(dx,2) *
// (un[j][i+1] - 2 * un[j][i] + un[j][i-1]) +
// dt / pow(dy,2) *
// (un[j+1][i] - 2 * un[j][i] + un[j-1][i])));
u[index(j,i)] = (un[index(j,i)]-
un[index(j,i)] * dt / dx *
(un[index(j,i)] - un[index(j,i-1)]) -
vn[index(j,i)] * dt / dy *
(un[index(j,i)] - un[index(j-1,i)]) -
dt / (2 * rho * dx) * (p[index(j,i+1)] - p[index(j,i-1)]) +
nu * (dt / pow(dx,2) *
(un[index(j,i+1)] - 2 * un[index(j,i)] + un[index(j,i-1)]) +
dt / pow(dy,2) *
(un[index(j+1,i)] - 2 * un[index(j,i)] + un[index(j-1,i)])));
}
}
for(int j=1;j<row-1;j++){
for(int i=1;i<col-1;i++){
// v[j][i] = (vn[j][i] -
// un[j][i] * dt / dx *
// (vn[j][i] - vn[j][i-1]) -
// vn[j][i] * dt / dy *
// (vn[j][i] - vn[j-1][i]) -
// dt / (2 * rho * dy) * (p[j+1][i] - p[j-1][i]) +
// nu * (dt / pow(dx,2) *
// (vn[j][i+1] - 2 * vn[j][i] + vn[j][i-1]) +
// dt / pow(dy,2) *
// (vn[j+1][i] - 2 * vn[j][i] + vn[j-1][i])));
v[index(j,i)] = (vn[index(j,i)] -
un[index(j,i)] * dt / dx *
(vn[index(j,i)] - vn[index(j,i-1)]) -
vn[index(j,i)] * dt / dy *
(vn[index(j,i)] - vn[index(j-1,i)]) -
dt / (2 * rho * dy) * (p[index(j+1,i)] - p[index(j-1,i)]) +
nu * (dt / pow(dx,2) *
(vn[index(j,i+1)] - 2 * vn[index(j,i)] + vn[index(j,i-1)]) +
dt / pow(dy,2) *
(vn[index(j+1,i)] - 2 * vn[index(j,i)] + vn[index(j-1,i)])));
}
}
for(int i=0;i<col;i++){
// u[0, :] = 0
// u[0][i] = 0;
u[index(0,i)] = 0;
}
for(int j=0;j<row;j++){
// u[:, 0] = 0
// u[j][0] = 0;
u[index(j,0)] = 0;
}
for(int j=0;j<row;j++){
// u[:, -1] = 0
// u[j][col-1] = 0;
u[index(j,col-1)] = 0;
}
for(int i=0;i<col;i++){
// u[-1, :] = 1 # set velocity on cavity lid equal to 1
// u[row-1][i] = 1;
u[index(row-1,i)] = 1;
}
for(int i=0;i<col;i++){
// v[0, :] = 0
// v[0][i] = 0;
v[index(0,i)] = 0;
}
for(int i=0;i<col;i++){
// v[-1, :] = 0
// v[row-1][i] = 0;
v[index(row-1,i)] = 0;
}
for(int j=0;j<row;j++){
// v[:, 0] = 0
// v[j][0] = 0;
v[index(j,0)] = 0;
}
for(int j=0;j<row;j++){
// v[:, -1] = 0
// v[j][col-1] = 0;
v[index(j,col-1)] = 0;
}
}
}
printf("end:%d\n",threadId);
}
__device__ void cavity_flow(cooperative_groups::grid_group grid,int threadId, double *u, double *v, double *p,double *b,double *un,double *vn,double *pn){
// if( threadId > 1599 ){
// printf("cavity_flow:%d,total:%d\n",threadId < nx * ny,nx * ny);
// }
// if( blockId < 1 && threadId < 1 ){
// printf("cavity_flow\n");
// }
// printf("cavity_flow\n");
// double *un,*vn;
// un = (double *)malloc(nx * ny * sizeof(double));
// vn = (double *)malloc(nx * ny * sizeof(double));
int row = ny,col = nx;
// zeros_gpu(b,ny,nx);
for(int n=0;n<nt;n++){
// printf("threadId:%d,n:%d\n",threadId,n);
// atomicAdd(&count, 1);
// printf("%d\n",n);
// if( blockId < 1 && threadId < 1 ){
// printf("cavity_flow\n");
// }
// copy
un[threadId] = u[threadId];
vn[threadId] = v[threadId];
pn[threadId] = p[threadId];
// // printf("first\n");
grid.sync();
// printf("second\n");
//边界判断
if( threadId / col == 0 || threadId / col == row-1 || threadId % col == 0 || threadId % col == col-1 ){
// printf("threadId:%d\n",threadId);
continue;
}
// copy(un,u);
// copy(vn,v);
// printf("---\n");
// change b
build_up_b(grid,threadId,b, u, v);
grid.sync();
// change p
pressure_poisson(grid,threadId,p, b,pn);
grid.sync();
//lock
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
// for(int j=1;j<row-1;j++){
// for(int i=1;i<col-1;i++){
// // u[j][i] = (un[j][i]-
// // un[j][i] * dt / dx *
// // (un[j][i] - un[j][i-1]) -
// // vn[j][i] * dt / dy *
// // (un[j][i] - un[j-1][i]) -
// // dt / (2 * rho * dx) * (p[j][i+1] - p[j][i-1]) +
// // nu * (dt / pow(dx,2) *
// // (un[j][i+1] - 2 * un[j][i] + un[j][i-1]) +
// // dt / pow(dy,2) *
// // (un[j+1][i] - 2 * un[j][i] + un[j-1][i])));
// u[index(j,i)] = (un[index(j,i)]-
// un[index(j,i)] * dt / dx *
// (un[index(j,i)] - un[index(j,i-1)]) -
// vn[index(j,i)] * dt / dy *
// (un[index(j,i)] - un[index(j-1,i)]) -
// dt / (2 * rho * dx) * (p[index(j,i+1)] - p[index(j,i-1)]) +
// nu * (dt / pow(dx,2) *
// (un[index(j,i+1)] - 2 * un[index(j,i)] + un[index(j,i-1)]) +
// dt / pow(dy,2) *
// (un[index(j+1,i)] - 2 * un[index(j,i)] + un[index(j-1,i)])));
// }
// }
u[threadId] = (un[threadId]-
un[threadId] * dt / dx *
(un[threadId] - un[threadId-1]) -
vn[threadId] * dt / dy *
(un[threadId] - un[threadId-col]) -
dt / (2 * rho * dx) * (p[threadId+1] - p[threadId-1]) +
nu * (dt / pow(dx,2) *
(un[threadId+1] - 2 * un[threadId] + un[threadId-1]) +
dt / pow(dy,2) *
(un[threadId+col] - 2 * un[threadId] + un[threadId-col])));
grid.sync();
//j-loop and i-loop have no Data dependence,so it can make Parallelization directly
// for(int j=1;j<row-1;j++){
// for(int i=1;i<col-1;i++){
// // v[j][i] = (vn[j][i] -
// // un[j][i] * dt / dx *
// // (vn[j][i] - vn[j][i-1]) -
// // vn[j][i] * dt / dy *
// // (vn[j][i] - vn[j-1][i]) -
// // dt / (2 * rho * dy) * (p[j+1][i] - p[j-1][i]) +
// // nu * (dt / pow(dx,2) *
// // (vn[j][i+1] - 2 * vn[j][i] + vn[j][i-1]) +
// // dt / pow(dy,2) *
// // (vn[j+1][i] - 2 * vn[j][i] + vn[j-1][i])));
// v[index(j,i)] = (vn[index(j,i)] -
// un[index(j,i)] * dt / dx *
// (vn[index(j,i)] - vn[index(j,i-1)]) -
// vn[index(j,i)] * dt / dy *
// (vn[index(j,i)] - vn[index(j-1,i)]) -
// dt / (2 * rho * dy) * (p[index(j+1,i)] - p[index(j-1,i)]) +
// nu * (dt / pow(dx,2) *
// (vn[index(j,i+1)] - 2 * vn[index(j,i)] + vn[index(j,i-1)]) +
// dt / pow(dy,2) *
// (vn[index(j+1,i)] - 2 * vn[index(j,i)] + vn[index(j-1,i)])));
// }
// }
v[threadId] = (vn[threadId] -
un[threadId] * dt / dx *
(vn[threadId] - vn[threadId-1]) -
vn[threadId] * dt / dy *
(vn[threadId] - vn[threadId-col]) -
dt / (2 * rho * dy) * (p[threadId+col] - p[threadId-col]) +
nu * (dt / pow(dx,2) *
(vn[threadId+1] - 2 * vn[threadId] + vn[threadId-1]) +
dt / pow(dy,2) *
(vn[threadId+col] - 2 * vn[threadId] + vn[threadId-col])));
grid.sync();
for(int i=0;i<col;i++){
// u[0, :] = 0
// u[0][i] = 0;
u[index(0,i)] = 0;
}
grid.sync();
for(int j=0;j<row;j++){
// u[:, 0] = 0
// u[j][0] = 0;
u[index(j,0)] = 0;
}
grid.sync();
for(int j=0;j<row;j++){
// u[:, -1] = 0
// u[j][col-1] = 0;
u[index(j,col-1)] = 0;
}
grid.sync();
for(int i=0;i<col;i++){
// u[-1, :] = 1 # set velocity on cavity lid equal to 1
// u[row-1][i] = 1;
u[index(row-1,i)] = 1;
}
grid.sync();
for(int i=0;i<col;i++){
// v[0, :] = 0
// v[0][i] = 0;
v[index(0,i)] = 0;
}
grid.sync();
for(int i=0;i<col;i++){
// v[-1, :] = 0
// v[row-1][i] = 0;
v[index(row-1,i)] = 0;
}
grid.sync();
for(int j=0;j<row;j++){
// v[:, 0] = 0
// v[j][0] = 0;
v[index(j,0)] = 0;
}
grid.sync();
for(int j=0;j<row;j++){
// v[:, -1] = 0
// v[j][col-1] = 0;
v[index(j,col-1)] = 0;
}
grid.sync();
//lock
}
// free(un);
// free(vn);
// un = NULL;
// vn = NULL;
printf("end:%d\n",threadId);
}
__global__ void kernel(double *u,double *v,double *p,double *b,double *un,double *vn,double *pn){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if( threadId < nx * ny ){
// printf("threadId:%d\n",threadId);
cooperative_groups::grid_group grid = cooperative_groups::this_grid();
// cavity_flow(grid,threadId,u,v,p,b,un,vn,pn);
cavity_flow(grid,threadId,u,v,p,b,un,vn,pn);
}
}
//nvcc cuda_cavity.cu -arch=sm_60 -rdc=true
int main(){
//2-d
// double *u,*v,*p,*b,*un,*vn,*pn,*bn;
double *u,*v,*p,*b,*un,*vn,*pn;
cudaMallocManaged(&u, row*col*sizeof(double));
cudaMallocManaged(&v, row*col*sizeof(double));
cudaMallocManaged(&p, row*col*sizeof(double));
cudaMallocManaged(&b, row*col*sizeof(double));
cudaMallocManaged(&un, row*col*sizeof(double));
cudaMallocManaged(&vn, row*col*sizeof(double));
cudaMallocManaged(&pn, row*col*sizeof(double));
zeros(u,row,col);
zeros(v,row,col);
zeros(p,row,col);
zeros(b,row,col);
void *args[] = {(void *)&u, (void *)&v, (void *)&p, (void *)&b,(void *)&u, (void *)&v, (void *)&p};
int total = row * col;
int BLOCK_NUM = total / THREAD_NUM ;
if( total % THREAD_NUM > 0 ){
BLOCK_NUM++;
}
printf("total:%d\n",total);
printf("BLOCK_NUM:%d\n",BLOCK_NUM);
printf("THREAD_NUM:%d\n",THREAD_NUM);
cudaLaunchCooperativeKernel((void*)kernel, BLOCK_NUM, THREAD_NUM, args);
cudaError_t error = cudaGetLastError();
printf("CUDA error: %s\n", cudaGetErrorString(error));
cudaDeviceSynchronize();
cudaError_t error2 = cudaGetLastError();
printf("CUDA error: %s\n", cudaGetErrorString(error2));
// printf("count:%d\n",count);
string u_json = array_2d_to_json(u,row,col),
v_json = array_2d_to_json(v,row,col),
p_json = array_2d_to_json(p,row,col);
write_string_to_file(u_json,U_FILE);
write_string_to_file(v_json,V_FILE);
write_string_to_file(p_json,P_FILE);
cudaFree(u);
cudaFree(v);
cudaFree(p);
cudaFree(b);
cudaFree(un);
cudaFree(vn);
cudaFree(pn);
} |
3e62b3afec90d479dc92e4a40207cc482893e37d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FCudaDeviceWrapper.hpp"
#include "FCudaTreeCoordinate.hpp"
#include "FCudaStructParams.hpp"
#define FMGetOppositeNeighIndex(index) (27-(index)-1)
#define FMGetOppositeInterIndex(index) (343-(index)-1)
#define FCudaMax(x,y) ((x)<(y) ? (y) : (x))
#define FCudaMin(x,y) ((x)>(y) ? (y) : (x))
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__bottomPassPerform(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
CudaKernelClass* kernel){
CellContainerClass leafCells(leafCellsPtr, leafCellsSize, leafCellsUpPtr, nullptr);
ParticleContainerGroupClass containers(containersPtr, containersSize, nullptr);
for(int leafIdx = blockIdx.x ; leafIdx < leafCells.getNumberOfCellsInBlock() ; leafIdx += gridDim.x){
typename CellContainerClass::CompleteCellClass cell = leafCells.getUpCell(leafIdx);
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(leafIdx);
FCudaAssertLF(leafCells.getCellMortonIndex(leafIdx) == containers.getLeafMortonIndex(leafIdx));
kernel->P2M(cell, &particles);
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__bottomPassCallback(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
CudaKernelClass* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
hipLaunchKernelGGL(( FCuda__bottomPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>)
, dim3(inGridSize), dim3(inBlocksSize), 0, currentStream,
leafCellsPtr, leafCellsSize,leafCellsUpPtr,
containersPtr, containersSize,
kernel);
FCudaCheckAfterCall();
FCudaCheck(hipStreamSynchronize(currentStream));
}
/////////////////////////////////////////////////////////////////////////////////////
/// Upward Pass
/////////////////////////////////////////////////////////////////////////////////////
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__upwardPassPerform(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, CudaKernelClass* kernel){
CellContainerClass currentCells(currentCellsPtr, currentCellsSize,currentCellsUpPtr,nullptr);
CellContainerClass subCellGroup(childCellsPtr, childCellsSize,childCellsUpPtr,nullptr);
const MortonIndex firstParent = FCudaMax(currentCells.getStartingIndex(), subCellGroup.getStartingIndex()>>3);
const MortonIndex lastParent = FCudaMin(currentCells.getEndingIndex()-1, (subCellGroup.getEndingIndex()-1)>>3);
int idxParentCell = currentCells.getCellIndex(firstParent);
int idxChildCell = subCellGroup.getFistChildIdx(firstParent);
while(true){
typename CellContainerClass::CompleteCellClass cell = currentCells.getUpCell(idxParentCell);
typename CellContainerClass::CompleteCellClass child[8];
for(int idxChild = 0 ; idxChild < 8 ; ++idxChild){
child[idxChild].symb = nullptr;
}
do{
const int idxChild = ((subCellGroup.getCellMortonIndex(idxChildCell)) & 7);
child[idxChild] = subCellGroup.getUpCell(idxChildCell);
idxChildCell += 1;
}while(idxChildCell != subCellGroup.getNumberOfCellsInBlock() && cell.symb->mortonIndex == (subCellGroup.getCellMortonIndex(idxChildCell)>>3));
kernel->M2M(cell, child, idxLevel);
if(currentCells.getCellMortonIndex(idxParentCell) == lastParent){
break;
}
idxParentCell += 1;
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__upwardPassCallback(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, CudaKernelClass* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
hipLaunchKernelGGL(( FCuda__upwardPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>)
, dim3(inGridSize), dim3(inBlocksSize), 0, currentStream,
currentCellsPtr, currentCellsSize,currentCellsUpPtr,
childCellsPtr, childCellsSize,childCellsUpPtr,
idxLevel, kernel);
FCudaCheckAfterCall();
FCudaCheck(hipStreamSynchronize(currentStream));
}
/////////////////////////////////////////////////////////////////////////////////////
/// Transfer Pass Mpi
/////////////////////////////////////////////////////////////////////////////////////
#ifdef SCALFMM_USE_MPI
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__transferInoutPassPerformMpi(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
const int* safeInteractions, int nbSafeInteractions, int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, CudaKernelClass* kernel){
CellContainerClass currentCells(currentCellsPtr, currentCellsSize, nullptr, currentCellsDownPtr);
CellContainerClass cellsOther(externalCellsPtr, externalCellsSize, externalCellsUpPtr, nullptr);
for(int cellIdx = blockIdx.x ; cellIdx < nbSafeInteractions ; cellIdx += gridDim.x){
for(int outInterIdx = safeInteractions[cellIdx] ; outInterIdx < safeInteractions[cellIdx+1] ; ++outInterIdx){
const int cellPos = cellsOther.getCellIndex(outsideInteractions[outInterIdx].outIndex);
if(cellPos != -1){
typename CellContainerClass::CompleteCellClass interCell = cellsOther.getUpCell(cellPos);
FCudaAssertLF(interCell.symb->mortonIndex == outsideInteractions[outInterIdx].outIndex);
typename CellContainerClass::CompleteCellClass cell = currentCells.getDownCell(outsideInteractions[outInterIdx].insideIdxInBlock);
FCudaAssertLF(cell.symb->mortonIndex == outsideInteractions[outInterIdx].insideIndex);
kernel->M2L( cell , &interCell, &outsideInteractions[outInterIdx].relativeOutPosition, 1, idxLevel);
}
}
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__transferInoutPassCallbackMpi(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, CudaKernelClass* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
OutOfBlockInteraction* cuOutsideInteractions;
FCudaCheck( hipMalloc(&cuOutsideInteractions,nbOutsideInteractions*sizeof(OutOfBlockInteraction)) );
FCudaCheck( hipMemcpy( cuOutsideInteractions, outsideInteractions, nbOutsideInteractions*sizeof(OutOfBlockInteraction),
hipMemcpyHostToDevice ) );
int* cuSafeInteractions;
FCudaCheck( hipMalloc(&cuSafeInteractions,(nbSafeInteractions+1)*sizeof(int)) );
FCudaCheck( hipMemcpy( cuSafeInteractions, safeInteractions, (nbSafeInteractions+1)*sizeof(int),
hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( FCuda__transferInoutPassPerformMpi
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>)
, dim3(inGridSize), dim3(inBlocksSize), 0, currentStream, currentCellsPtr, currentCellsSize, currentCellsDownPtr,
externalCellsPtr, externalCellsSize, externalCellsUpPtr,
cuSafeInteractions, nbSafeInteractions, idxLevel, cuOutsideInteractions, nbOutsideInteractions, kernel);
FCudaCheckAfterCall();
FCudaCheck(hipStreamSynchronize(currentStream));
FCudaCheck(hipFree(cuSafeInteractions));
FCudaCheck(hipFree(cuOutsideInteractions));
}
#endif
/////////////////////////////////////////////////////////////////////////////////////
/// Transfer Pass
/////////////////////////////////////////////////////////////////////////////////////
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__transferInPassPerform(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, CudaKernelClass* kernel){
CellContainerClass currentCells(currentCellsPtr, currentCellsSize, currentCellsUpPtr, currentCellsDownPtr);
const MortonIndex blockStartIdx = currentCells.getStartingIndex();
const MortonIndex blockEndIdx = currentCells.getEndingIndex();
for(int cellIdx = blockIdx.x ; cellIdx < currentCells.getNumberOfCellsInBlock() ; cellIdx += gridDim.x){
typename CellContainerClass::CompleteCellClass cell = currentCells.getDownCell(cellIdx);
MortonIndex interactionsIndexes[189];
int interactionsPosition[189];
const int3 coord = (FCudaTreeCoordinate::ConvertCoordinate(cell.symb->coordinates));
int counter = FCudaTreeCoordinate::GetInteractionNeighbors(coord, idxLevel,interactionsIndexes,interactionsPosition);
typename CellContainerClass::CompleteCellClass interactions[189];
int counterExistingCell = 0;
for(int idxInter = 0 ; idxInter < counter ; ++idxInter){
if( blockStartIdx <= interactionsIndexes[idxInter] && interactionsIndexes[idxInter] < blockEndIdx ){
const int cellPos = currentCells.getCellIndex(interactionsIndexes[idxInter]);
if(cellPos != -1){
typename CellContainerClass::CompleteCellClass interCell = currentCells.getUpCell(cellPos);
interactions[counterExistingCell] = interCell;
interactionsPosition[counterExistingCell] = interactionsPosition[idxInter];
counterExistingCell += 1;
}
}
}
kernel->M2L( cell , interactions, interactionsPosition, counterExistingCell, idxLevel);
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__transferInPassCallback(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, CudaKernelClass* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
hipLaunchKernelGGL(( FCuda__transferInPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>)
, dim3(inGridSize), dim3(inBlocksSize), 0, currentStream, currentCellsPtr, currentCellsSize,
currentCellsUpPtr, currentCellsDownPtr,
idxLevel, kernel);
FCudaCheckAfterCall();
FCudaCheck(hipStreamSynchronize(currentStream));
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__transferInoutPassPerform(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, CudaKernelClass* kernel){
CellContainerClass currentCells(currentCellsPtr, currentCellsSize, nullptr, currentCellsDownPtr);
CellContainerClass cellsOther(externalCellsPtr, externalCellsSize, externalCellsUpPtr, nullptr);
if(mode == 1){
for(int cellIdx = blockIdx.x ; cellIdx < nbSafeInteractions ; cellIdx += gridDim.x){
for(int outInterIdx = safeInteractions[cellIdx] ; outInterIdx < safeInteractions[cellIdx+1] ; ++outInterIdx){
typename CellContainerClass::CompleteCellClass interCell = cellsOther.getUpCell(outsideInteractions[outInterIdx].outsideIdxInBlock);
FCudaAssertLF(interCell.symb->mortonIndex == outsideInteractions[outInterIdx].outIndex);
typename CellContainerClass::CompleteCellClass cell = currentCells.getDownCell(outsideInteractions[outInterIdx].insideIdxInBlock);
FCudaAssertLF(cell.symb->mortonIndex == outsideInteractions[outInterIdx].insideIndex);
kernel->M2L( cell , &interCell, &outsideInteractions[outInterIdx].relativeOutPosition, 1, idxLevel);
}
}
}
else{
for(int cellIdx = blockIdx.x ; cellIdx < nbSafeInteractions ; cellIdx += gridDim.x){
for(int outInterIdx = safeInteractions[cellIdx] ; outInterIdx < safeInteractions[cellIdx+1] ; ++outInterIdx){
typename CellContainerClass::CompleteCellClass cell = cellsOther.getUpCell(outsideInteractions[outInterIdx].insideIdxInBlock);
FCudaAssertLF(cell.symb->mortonIndex == outsideInteractions[outInterIdx].insideIndex);
typename CellContainerClass::CompleteCellClass interCell = currentCells.getDownCell(outsideInteractions[outInterIdx].outsideIdxInBlock);
FCudaAssertLF(interCell.symb->mortonIndex == outsideInteractions[outInterIdx].outIndex);
const int otherPosition = FMGetOppositeInterIndex(outsideInteractions[outInterIdx].relativeOutPosition);
kernel->M2L( interCell , &cell, &otherPosition, 1, idxLevel);
}
}
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__transferInoutPassCallback(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions,
CudaKernelClass* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
OutOfBlockInteraction* cuOutsideInteractions;
FCudaCheck( hipMalloc(&cuOutsideInteractions,nbOutsideInteractions*sizeof(OutOfBlockInteraction)) );
FCudaCheck( hipMemcpy( cuOutsideInteractions, outsideInteractions, nbOutsideInteractions*sizeof(OutOfBlockInteraction),
hipMemcpyHostToDevice ) );
int* cuSafeInteractions;
FCudaCheck( hipMalloc(&cuSafeInteractions,(nbSafeInteractions+1)*sizeof(int)) );
FCudaCheck( hipMemcpy( cuSafeInteractions, safeInteractions, (nbSafeInteractions+1)*sizeof(int),
hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( FCuda__transferInoutPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>)
, dim3(inGridSize), dim3(inBlocksSize), 0, currentStream, currentCellsPtr, currentCellsSize,
currentCellsDownPtr,
externalCellsPtr, externalCellsSize,
externalCellsUpPtr,
idxLevel, mode,
cuOutsideInteractions, nbOutsideInteractions,
cuSafeInteractions, nbSafeInteractions,
kernel);
FCudaCheckAfterCall();
FCudaCheck(hipStreamSynchronize(currentStream));
FCudaCheck(hipFree(cuOutsideInteractions));
FCudaCheck(hipFree(cuSafeInteractions));
}
/////////////////////////////////////////////////////////////////////////////////////
/// Downard Pass
/////////////////////////////////////////////////////////////////////////////////////
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__downardPassPerform(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, CudaKernelClass* kernel){
CellContainerClass currentCells(currentCellsPtr, currentCellsSize,nullptr,currentCellsDownPtr);
CellContainerClass subCellGroup(childCellsPtr, childCellsSize,nullptr,childCellsDownPtr);
const MortonIndex firstParent = FCudaMax(currentCells.getStartingIndex(), subCellGroup.getStartingIndex()>>3);
const MortonIndex lastParent = FCudaMin(currentCells.getEndingIndex()-1, (subCellGroup.getEndingIndex()-1)>>3);
int idxParentCell = currentCells.getCellIndex(firstParent);
int idxChildCell = subCellGroup.getFistChildIdx(firstParent);
while(true){
typename CellContainerClass::CompleteCellClass cell = currentCells.getDownCell(idxParentCell);
typename CellContainerClass::CompleteCellClass child[8];
for(int idxChild = 0 ; idxChild < 8 ; ++idxChild){
child[idxChild].symb = nullptr;
}
do{
const int idxChild = ((subCellGroup.getCellMortonIndex(idxChildCell)) & 7);
child[idxChild] = subCellGroup.getDownCell(idxChildCell);
idxChildCell += 1;
}while(idxChildCell != subCellGroup.getNumberOfCellsInBlock() && cell.symb->mortonIndex == (subCellGroup.getCellMortonIndex(idxChildCell)>>3));
kernel->L2L(cell, child, idxLevel);
if(currentCells.getCellMortonIndex(idxParentCell) == lastParent){
break;
}
idxParentCell += 1;
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__downardPassCallback(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, CudaKernelClass* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
hipLaunchKernelGGL(( FCuda__downardPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>)
, dim3(inGridSize), dim3(inBlocksSize), 0, currentStream,
currentCellsPtr, currentCellsSize, currentCellsDownPtr, childCellsPtr, childCellsSize, childCellsDownPtr,
idxLevel, kernel);
FCudaCheckAfterCall();
FCudaCheck(hipStreamSynchronize(currentStream));
}
/////////////////////////////////////////////////////////////////////////////////////
/// Direct Pass MPI
/////////////////////////////////////////////////////////////////////////////////////
#ifdef SCALFMM_USE_MPI
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__directInoutPassPerformMpi(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, CudaKernelClass* kernel){
ParticleContainerGroupClass containers(containersPtr, containersSize, containersDownPtr);
ParticleContainerGroupClass containersOther(externalContainersPtr, externalContainersSize, nullptr);
for(int leafIdx = blockIdx.x ; leafIdx < counterOuterCell ; leafIdx += gridDim.x){
for(int outInterIdx = safeOuterInteractions[leafIdx] ; outInterIdx < safeOuterInteractions[leafIdx+1] ; ++outInterIdx){
const int leafPos = containersOther.getLeafIndex(outsideInteractions[outInterIdx].outIndex);
if(leafPos != -1){
ParticleGroupClass interParticles = containersOther.template getLeaf<ParticleGroupClass>(leafPos);
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(outsideInteractions[outInterIdx].insideIdxInBlock);
kernel->P2PRemote( FCudaTreeCoordinate::GetPositionFromMorton(outsideInteractions[outInterIdx].insideIndex, treeHeight-1),
&particles, &particles , &interParticles, &outsideInteractions[outInterIdx].relativeOutPosition, 1);
}
}
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__directInoutPassCallbackMpi(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, CudaKernelClass* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
OutOfBlockInteraction* cuOutsideInteractions;
FCudaCheck( hipMalloc(&cuOutsideInteractions,nbOutsideInteractions*sizeof(OutOfBlockInteraction)) );
FCudaCheck( hipMemcpy( cuOutsideInteractions, outsideInteractions, nbOutsideInteractions*sizeof(OutOfBlockInteraction),
hipMemcpyHostToDevice ) );
int* cuSafeOuterInteractions;
FCudaCheck( hipMalloc(&cuSafeOuterInteractions,(counterOuterCell+1)*sizeof(int)) );
FCudaCheck( hipMemcpy( cuSafeOuterInteractions, safeOuterInteractions, (counterOuterCell+1)*sizeof(int),
hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( FCuda__directInoutPassPerformMpi
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>)
, dim3(inGridSize), dim3(inBlocksSize), 0, currentStream, containersPtr, containersSize, containersDownPtr,
externalContainersPtr, externalContainersSize,
cuOutsideInteractions, nbOutsideInteractions, cuSafeOuterInteractions, counterOuterCell,
treeHeight, kernel);
FCudaCheckAfterCall();
FCudaCheck(hipStreamSynchronize(currentStream));
FCudaCheck(hipFree(cuOutsideInteractions));
FCudaCheck(hipFree(cuSafeOuterInteractions));
}
#endif
/////////////////////////////////////////////////////////////////////////////////////
/// Direct Pass
/////////////////////////////////////////////////////////////////////////////////////
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__directInPassPerform(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, CudaKernelClass* kernel){
ParticleContainerGroupClass containers(containersPtr, containersSize, containersDownPtr);
const MortonIndex blockStartIdx = containers.getStartingIndex();
const MortonIndex blockEndIdx = containers.getEndingIndex();
for(int leafIdx = blockIdx.x ; leafIdx < containers.getNumberOfLeavesInBlock() ; leafIdx += gridDim.x){
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(leafIdx);
const MortonIndex mindex = containers.getLeafMortonIndex(leafIdx);
MortonIndex interactionsIndexes[26];
int interactionsPosition[26];
const int3 coord = FCudaTreeCoordinate::GetPositionFromMorton(mindex, treeHeight-1);
int counter = FCudaTreeCoordinate::GetNeighborsIndexes(coord, treeHeight,interactionsIndexes,interactionsPosition);
ParticleGroupClass interactionsObjects[26];
int counterExistingCell = 0;
for(int idxInter = 0 ; idxInter < counter ; ++idxInter){
if( blockStartIdx <= interactionsIndexes[idxInter] && interactionsIndexes[idxInter] < blockEndIdx ){
const int leafPos = containers.getLeafIndex(interactionsIndexes[idxInter]);
if(leafPos != -1){
interactionsObjects[counterExistingCell] = containers.template getLeaf<ParticleGroupClass>(leafPos);
interactionsPosition[counterExistingCell] = interactionsPosition[idxInter];
counterExistingCell += 1;
}
}
}
kernel->P2P( coord, &particles, &particles , interactionsObjects, interactionsPosition, counterExistingCell);
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__directInPassCallback(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, CudaKernelClass* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
hipLaunchKernelGGL(( FCuda__directInPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>)
, dim3(inGridSize), dim3(inBlocksSize), 0, currentStream, containersPtr, containersSize, containersDownPtr,
treeHeight, kernel);
FCudaCheckAfterCall();
FCudaCheck(hipStreamSynchronize(currentStream));
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__directInoutPassPerform(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell,
const int treeHeight, CudaKernelClass* kernel){
ParticleContainerGroupClass containers(containersPtr, containersSize, containersDownPtr);
ParticleContainerGroupClass containersOther(externalContainersPtr, externalContainersSize, externalContainersDownPtr);
for(int leafIdx = blockIdx.x ; leafIdx < counterOuterCell ; leafIdx += gridDim.x){
for(int outInterIdx = safeOuterInteractions[leafIdx] ; outInterIdx < safeOuterInteractions[leafIdx+1] ; ++outInterIdx){
ParticleGroupClass interParticles = containersOther.template getLeaf<ParticleGroupClass>(outsideInteractions[outInterIdx].outsideIdxInBlock);
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(outsideInteractions[outInterIdx].insideIdxInBlock);
FCudaAssertLF(containersOther.getLeafMortonIndex(outsideInteractions[outInterIdx].outsideIdxInBlock) == outsideInteractions[outInterIdx].outIndex);
FCudaAssertLF(containers.getLeafMortonIndex(outsideInteractions[outInterIdx].insideIdxInBlock) == outsideInteractions[outInterIdx].insideIndex);
kernel->P2POuter( FCudaTreeCoordinate::GetPositionFromMorton(outsideInteractions[outInterIdx].insideIndex, treeHeight-1),
&particles , &interParticles, &outsideInteractions[outInterIdx].relativeOutPosition, 1);
}
}
for(int leafIdx = blockIdx.x ; leafIdx < counterInnerCell ; leafIdx += gridDim.x){
for(int outInterIdx = safeInnterInteractions[leafIdx] ; outInterIdx < safeInnterInteractions[leafIdx+1] ; ++outInterIdx){
ParticleGroupClass interParticles = containersOther.template getLeaf<ParticleGroupClass>(insideInteractions[outInterIdx].outsideIdxInBlock);
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(insideInteractions[outInterIdx].insideIdxInBlock);
FCudaAssertLF(containersOther.getLeafMortonIndex(insideInteractions[outInterIdx].outsideIdxInBlock) == insideInteractions[outInterIdx].outIndex);
FCudaAssertLF(containers.getLeafMortonIndex(insideInteractions[outInterIdx].insideIdxInBlock) == insideInteractions[outInterIdx].insideIndex);
const int otherPosition = FMGetOppositeNeighIndex(insideInteractions[outInterIdx].relativeOutPosition);
kernel->P2POuter( FCudaTreeCoordinate::GetPositionFromMorton(insideInteractions[outInterIdx].outIndex, treeHeight-1),
&interParticles , &particles, &otherPosition, 1);
}
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__directInoutPassCallback(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell,
const int treeHeight, CudaKernelClass* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
OutOfBlockInteraction* cuOutsideInteractions;
FCudaCheck( hipMalloc(&cuOutsideInteractions,nbOutsideInteractions*sizeof(OutOfBlockInteraction)) );
FCudaCheck( hipMemcpy( cuOutsideInteractions, outsideInteractions, nbOutsideInteractions*sizeof(OutOfBlockInteraction),
hipMemcpyHostToDevice ) );
OutOfBlockInteraction* cuInsideInteractions;
FCudaCheck( hipMalloc(&cuInsideInteractions,nbOutsideInteractions*sizeof(OutOfBlockInteraction)) );
FCudaCheck( hipMemcpy( cuInsideInteractions, insideInteractions, nbOutsideInteractions*sizeof(OutOfBlockInteraction),
hipMemcpyHostToDevice ) );
int* cuSafeOuterInteractions;
FCudaCheck( hipMalloc(&cuSafeOuterInteractions,(counterOuterCell+1)*sizeof(int)) );
FCudaCheck( hipMemcpy( cuSafeOuterInteractions, safeOuterInteractions, (counterOuterCell+1)*sizeof(int),
hipMemcpyHostToDevice ) );
int* cuSafeInnterInteractions;
FCudaCheck( hipMalloc(&cuSafeInnterInteractions,(counterInnerCell+1)*sizeof(int)) );
FCudaCheck( hipMemcpy( cuSafeInnterInteractions, safeInnterInteractions, (counterInnerCell+1)*sizeof(int),
hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( FCuda__directInoutPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>)
, dim3(inGridSize), dim3(inBlocksSize), 0, currentStream, containersPtr, containersSize,containersDownPtr,
externalContainersPtr, externalContainersSize,externalContainersDownPtr,
cuOutsideInteractions, nbOutsideInteractions,
cuSafeOuterInteractions,counterOuterCell,
cuInsideInteractions,
cuSafeInnterInteractions , counterInnerCell,
treeHeight, kernel);
FCudaCheckAfterCall();
FCudaCheck(hipStreamSynchronize(currentStream));
FCudaCheck(hipFree(cuOutsideInteractions));
FCudaCheck(hipFree(cuInsideInteractions));
FCudaCheck(hipFree(cuSafeOuterInteractions));
FCudaCheck(hipFree(cuSafeInnterInteractions));
}
/////////////////////////////////////////////////////////////////////////////////////
/// Merge Pass
/////////////////////////////////////////////////////////////////////////////////////
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__mergePassPerform(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
CudaKernelClass* kernel){
CellContainerClass leafCells(leafCellsPtr,leafCellsSize, nullptr, leafCellsDownPtr);
ParticleContainerGroupClass containers(containersPtr,containersSize, containersDownPtr);
for(int cellIdx = blockIdx.x ; cellIdx < leafCells.getNumberOfCellsInBlock() ; cellIdx += gridDim.x){
typename CellContainerClass::CompleteCellClass cell = leafCells.getDownCell(cellIdx);
FCudaAssertLF(cell.symb->mortonIndex == leafCells.getCellMortonIndex(cellIdx));
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(cellIdx);
FCudaAssertLF(leafCells.getCellMortonIndex(cellIdx) == containers.getLeafMortonIndex(cellIdx));
kernel->L2P(cell, &particles);
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__mergePassCallback(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
CudaKernelClass* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
hipLaunchKernelGGL(( FCuda__mergePassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>)
, dim3(inGridSize), dim3(inBlocksSize), 0, currentStream, leafCellsPtr, leafCellsSize,leafCellsDownPtr,
containersPtr, containersSize,containersDownPtr,
kernel);
FCudaCheckAfterCall();
FCudaCheck(hipStreamSynchronize(currentStream));
}
template <class CudaKernelClass>
CudaKernelClass* FCuda__BuildCudaKernel(void* kernel){
return CudaKernelClass::InitKernelKernel(kernel);
}
template <class CudaKernelClass>
void FCuda__ReleaseCudaKernel(CudaKernelClass* cukernel){
CudaKernelClass::ReleaseKernel(cukernel);
}
template <class CudaKernelClass>
dim3 FCuda__GetGridSize(CudaKernelClass* /*kernel*/, int intervalSize){
return CudaKernelClass::GetGridSize(intervalSize);
}
template <class CudaKernelClass>
dim3 FCuda__GetBlockSize(CudaKernelClass* /*kernel*/){
return CudaKernelClass::GetBlocksSize();
}
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
#include "FCudaGroupOfCells.hpp"
#include "FCudaGroupAttachedLeaf.hpp"
#include "FCudaGroupOfParticles.hpp"
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
#include "FCudaEmptyKernel.hpp"
#include "FCudaEmptyCellSymb.hpp"
template void FCuda__bottomPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FCudaEmptyKernel<int>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FCudaEmptyKernel<int>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FCudaEmptyKernel<int>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FCudaEmptyKernel<int>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FCudaEmptyKernel<int>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FCudaEmptyKernel<int>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FCudaEmptyKernel<int>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FCudaEmptyKernel<int>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell,
const int treeHeight, FCudaEmptyKernel<int>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FCudaEmptyKernel<int>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FCudaEmptyKernel<int>* FCuda__BuildCudaKernel< FCudaEmptyKernel<int> >(void* kernel);
template void FCuda__ReleaseCudaKernel< FCudaEmptyKernel<int> >(FCudaEmptyKernel<int>* cukernel);
template dim3 FCuda__GetGridSize< FCudaEmptyKernel<int> >(FCudaEmptyKernel<int>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FCudaEmptyKernel<int> >(FCudaEmptyKernel<int>* cukernel);
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
#include "../TestKernel/FCudaTestKernels.hpp"
#include "../TestKernel/FTestCellPOD.hpp"
template void FCuda__bottomPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FTestCudaKernels<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FTestCudaKernels<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FTestCudaKernels<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FTestCudaKernels<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FTestCudaKernels<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FTestCudaKernels<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FTestCudaKernels<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FTestCudaKernels<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FTestCudaKernels<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FTestCudaKernels<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FTestCudaKernels<float>* FCuda__BuildCudaKernel<FTestCudaKernels<float>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FTestCudaKernels<float>>(FTestCudaKernels<float>* cukernel);
template dim3 FCuda__GetGridSize< FTestCudaKernels<float> >(FTestCudaKernels<float>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FTestCudaKernels<float> >(FTestCudaKernels<float>* cukernel);
template void FCuda__bottomPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FTestCudaKernels<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FTestCudaKernels<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FTestCudaKernels<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FTestCudaKernels<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FTestCudaKernels<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FTestCudaKernels<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FTestCudaKernels<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FTestCudaKernels<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FTestCudaKernels<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FTestCudaKernels<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FTestCudaKernels<double>* FCuda__BuildCudaKernel<FTestCudaKernels<double>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FTestCudaKernels<double>>(FTestCudaKernels<double>* cukernel);
template dim3 FCuda__GetGridSize< FTestCudaKernels<double> >(FTestCudaKernels<double>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FTestCudaKernels<double> >(FTestCudaKernels<double>* cukernel);
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
#include "../P2P/FCudaP2P.hpp"
template void FCuda__bottomPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FCudaP2P<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FCudaP2P<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FCudaP2P<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FCudaP2P<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FCudaP2P<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FCudaP2P<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FCudaP2P<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FCudaP2P<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FCudaP2P<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FCudaP2P<float>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FCudaP2P<float>* FCuda__BuildCudaKernel<FCudaP2P<float>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FCudaP2P<float>>(FCudaP2P<float>* cukernel);
template dim3 FCuda__GetGridSize< FCudaP2P<float> >(FCudaP2P<float>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FCudaP2P<float> >(FCudaP2P<float>* cukernel);
template void FCuda__bottomPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FCudaP2P<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FCudaP2P<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FCudaP2P<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FCudaP2P<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FCudaP2P<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FCudaP2P<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FCudaP2P<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FCudaP2P<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FCudaP2P<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FCudaP2P<double>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FCudaP2P<double>* FCuda__BuildCudaKernel<FCudaP2P<double>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FCudaP2P<double>>(FCudaP2P<double>* cukernel);
template dim3 FCuda__GetGridSize< FCudaP2P<double> >(FCudaP2P<double>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FCudaP2P<double> >(FCudaP2P<double>* cukernel);
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
#include "../Uniform/FUnifCuda.hpp"
template void FCuda__bottomPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FUnifCuda<float,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FUnifCuda<float,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FUnifCuda<float,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FUnifCuda<float,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FUnifCuda<float,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FUnifCuda<float,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FUnifCuda<float,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FUnifCuda<float,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FUnifCuda<float,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FUnifCuda<float,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FUnifCuda<float,5>* FCuda__BuildCudaKernel<FUnifCuda<float,5>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FUnifCuda<float,5>>(FUnifCuda<float,5>* cukernel);
template dim3 FCuda__GetGridSize< FUnifCuda<float,5> >(FUnifCuda<float,5>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FUnifCuda<float,5> >(FUnifCuda<float,5>* cukernel);
template void FUnifCudaFillObject(void* cudaKernel, const FUnifCudaSharedData<double,5>& hostData);
template void FCuda__bottomPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FUnifCuda<double,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FUnifCuda<double,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FUnifCuda<double,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FUnifCuda<double,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FUnifCuda<double,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FUnifCuda<double,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FUnifCuda<double,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FUnifCuda<double,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FUnifCuda<double,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FUnifCuda<double,5>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FUnifCuda<double,5>* FCuda__BuildCudaKernel<FUnifCuda<double,5>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FUnifCuda<double,5>>(FUnifCuda<double,5>* cukernel);
template dim3 FCuda__GetGridSize< FUnifCuda<double,5> >(FUnifCuda<double,5>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FUnifCuda<double,5> >(FUnifCuda<double,5>* cukernel);
template void FUnifCudaFillObject(void* cudaKernel, const FUnifCudaSharedData<float,5>& hostData);
template void FCuda__bottomPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FUnifCuda<float,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FUnifCuda<float,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FUnifCuda<float,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FUnifCuda<float,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FUnifCuda<float,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FUnifCuda<float,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FUnifCuda<float,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FUnifCuda<float,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FUnifCuda<float,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FUnifCuda<float,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FUnifCuda<float,7>* FCuda__BuildCudaKernel<FUnifCuda<float,7>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FUnifCuda<float,7>>(FUnifCuda<float,7>* cukernel);
template dim3 FCuda__GetGridSize< FUnifCuda<float,7> >(FUnifCuda<float,7>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FUnifCuda<float,7> >(FUnifCuda<float,7>* cukernel);
template void FUnifCudaFillObject(void* cudaKernel, const FUnifCudaSharedData<double,7>& hostData);
template void FCuda__bottomPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FUnifCuda<double,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FUnifCuda<double,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FUnifCuda<double,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FUnifCuda<double,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FUnifCuda<double,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FUnifCuda<double,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FUnifCuda<double,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FUnifCuda<double,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FUnifCuda<double,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FUnifCuda<double,7>* kernel, hipStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FUnifCuda<double,7>* FCuda__BuildCudaKernel<FUnifCuda<double,7>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FUnifCuda<double,7>>(FUnifCuda<double,7>* cukernel);
template dim3 FCuda__GetGridSize< FUnifCuda<double,7> >(FUnifCuda<double,7>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FUnifCuda<double,7> >(FUnifCuda<double,7>* cukernel);
template void FUnifCudaFillObject(void* cudaKernel, const FUnifCudaSharedData<float,7>& hostData);
| 3e62b3afec90d479dc92e4a40207cc482893e37d.cu | #include "FCudaDeviceWrapper.hpp"
#include "FCudaTreeCoordinate.hpp"
#include "FCudaStructParams.hpp"
#define FMGetOppositeNeighIndex(index) (27-(index)-1)
#define FMGetOppositeInterIndex(index) (343-(index)-1)
#define FCudaMax(x,y) ((x)<(y) ? (y) : (x))
#define FCudaMin(x,y) ((x)>(y) ? (y) : (x))
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__bottomPassPerform(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
CudaKernelClass* kernel){
CellContainerClass leafCells(leafCellsPtr, leafCellsSize, leafCellsUpPtr, nullptr);
ParticleContainerGroupClass containers(containersPtr, containersSize, nullptr);
for(int leafIdx = blockIdx.x ; leafIdx < leafCells.getNumberOfCellsInBlock() ; leafIdx += gridDim.x){
typename CellContainerClass::CompleteCellClass cell = leafCells.getUpCell(leafIdx);
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(leafIdx);
FCudaAssertLF(leafCells.getCellMortonIndex(leafIdx) == containers.getLeafMortonIndex(leafIdx));
kernel->P2M(cell, &particles);
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__bottomPassCallback(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
CudaKernelClass* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
FCuda__bottomPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>
<<<inGridSize, inBlocksSize, 0, currentStream>>>
(leafCellsPtr, leafCellsSize,leafCellsUpPtr,
containersPtr, containersSize,
kernel);
FCudaCheckAfterCall();
FCudaCheck(cudaStreamSynchronize(currentStream));
}
/////////////////////////////////////////////////////////////////////////////////////
/// Upward Pass
/////////////////////////////////////////////////////////////////////////////////////
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__upwardPassPerform(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, CudaKernelClass* kernel){
CellContainerClass currentCells(currentCellsPtr, currentCellsSize,currentCellsUpPtr,nullptr);
CellContainerClass subCellGroup(childCellsPtr, childCellsSize,childCellsUpPtr,nullptr);
const MortonIndex firstParent = FCudaMax(currentCells.getStartingIndex(), subCellGroup.getStartingIndex()>>3);
const MortonIndex lastParent = FCudaMin(currentCells.getEndingIndex()-1, (subCellGroup.getEndingIndex()-1)>>3);
int idxParentCell = currentCells.getCellIndex(firstParent);
int idxChildCell = subCellGroup.getFistChildIdx(firstParent);
while(true){
typename CellContainerClass::CompleteCellClass cell = currentCells.getUpCell(idxParentCell);
typename CellContainerClass::CompleteCellClass child[8];
for(int idxChild = 0 ; idxChild < 8 ; ++idxChild){
child[idxChild].symb = nullptr;
}
do{
const int idxChild = ((subCellGroup.getCellMortonIndex(idxChildCell)) & 7);
child[idxChild] = subCellGroup.getUpCell(idxChildCell);
idxChildCell += 1;
}while(idxChildCell != subCellGroup.getNumberOfCellsInBlock() && cell.symb->mortonIndex == (subCellGroup.getCellMortonIndex(idxChildCell)>>3));
kernel->M2M(cell, child, idxLevel);
if(currentCells.getCellMortonIndex(idxParentCell) == lastParent){
break;
}
idxParentCell += 1;
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__upwardPassCallback(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, CudaKernelClass* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
FCuda__upwardPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>
<<<inGridSize, inBlocksSize, 0, currentStream>>>
(currentCellsPtr, currentCellsSize,currentCellsUpPtr,
childCellsPtr, childCellsSize,childCellsUpPtr,
idxLevel, kernel);
FCudaCheckAfterCall();
FCudaCheck(cudaStreamSynchronize(currentStream));
}
/////////////////////////////////////////////////////////////////////////////////////
/// Transfer Pass Mpi
/////////////////////////////////////////////////////////////////////////////////////
#ifdef SCALFMM_USE_MPI
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__transferInoutPassPerformMpi(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
const int* safeInteractions, int nbSafeInteractions, int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, CudaKernelClass* kernel){
CellContainerClass currentCells(currentCellsPtr, currentCellsSize, nullptr, currentCellsDownPtr);
CellContainerClass cellsOther(externalCellsPtr, externalCellsSize, externalCellsUpPtr, nullptr);
for(int cellIdx = blockIdx.x ; cellIdx < nbSafeInteractions ; cellIdx += gridDim.x){
for(int outInterIdx = safeInteractions[cellIdx] ; outInterIdx < safeInteractions[cellIdx+1] ; ++outInterIdx){
const int cellPos = cellsOther.getCellIndex(outsideInteractions[outInterIdx].outIndex);
if(cellPos != -1){
typename CellContainerClass::CompleteCellClass interCell = cellsOther.getUpCell(cellPos);
FCudaAssertLF(interCell.symb->mortonIndex == outsideInteractions[outInterIdx].outIndex);
typename CellContainerClass::CompleteCellClass cell = currentCells.getDownCell(outsideInteractions[outInterIdx].insideIdxInBlock);
FCudaAssertLF(cell.symb->mortonIndex == outsideInteractions[outInterIdx].insideIndex);
kernel->M2L( cell , &interCell, &outsideInteractions[outInterIdx].relativeOutPosition, 1, idxLevel);
}
}
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__transferInoutPassCallbackMpi(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, CudaKernelClass* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
OutOfBlockInteraction* cuOutsideInteractions;
FCudaCheck( cudaMalloc(&cuOutsideInteractions,nbOutsideInteractions*sizeof(OutOfBlockInteraction)) );
FCudaCheck( cudaMemcpy( cuOutsideInteractions, outsideInteractions, nbOutsideInteractions*sizeof(OutOfBlockInteraction),
cudaMemcpyHostToDevice ) );
int* cuSafeInteractions;
FCudaCheck( cudaMalloc(&cuSafeInteractions,(nbSafeInteractions+1)*sizeof(int)) );
FCudaCheck( cudaMemcpy( cuSafeInteractions, safeInteractions, (nbSafeInteractions+1)*sizeof(int),
cudaMemcpyHostToDevice ) );
FCuda__transferInoutPassPerformMpi
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>
<<<inGridSize, inBlocksSize, 0, currentStream>>>(currentCellsPtr, currentCellsSize, currentCellsDownPtr,
externalCellsPtr, externalCellsSize, externalCellsUpPtr,
cuSafeInteractions, nbSafeInteractions, idxLevel, cuOutsideInteractions, nbOutsideInteractions, kernel);
FCudaCheckAfterCall();
FCudaCheck(cudaStreamSynchronize(currentStream));
FCudaCheck(cudaFree(cuSafeInteractions));
FCudaCheck(cudaFree(cuOutsideInteractions));
}
#endif
/////////////////////////////////////////////////////////////////////////////////////
/// Transfer Pass
/////////////////////////////////////////////////////////////////////////////////////
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__transferInPassPerform(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, CudaKernelClass* kernel){
CellContainerClass currentCells(currentCellsPtr, currentCellsSize, currentCellsUpPtr, currentCellsDownPtr);
const MortonIndex blockStartIdx = currentCells.getStartingIndex();
const MortonIndex blockEndIdx = currentCells.getEndingIndex();
for(int cellIdx = blockIdx.x ; cellIdx < currentCells.getNumberOfCellsInBlock() ; cellIdx += gridDim.x){
typename CellContainerClass::CompleteCellClass cell = currentCells.getDownCell(cellIdx);
MortonIndex interactionsIndexes[189];
int interactionsPosition[189];
const int3 coord = (FCudaTreeCoordinate::ConvertCoordinate(cell.symb->coordinates));
int counter = FCudaTreeCoordinate::GetInteractionNeighbors(coord, idxLevel,interactionsIndexes,interactionsPosition);
typename CellContainerClass::CompleteCellClass interactions[189];
int counterExistingCell = 0;
for(int idxInter = 0 ; idxInter < counter ; ++idxInter){
if( blockStartIdx <= interactionsIndexes[idxInter] && interactionsIndexes[idxInter] < blockEndIdx ){
const int cellPos = currentCells.getCellIndex(interactionsIndexes[idxInter]);
if(cellPos != -1){
typename CellContainerClass::CompleteCellClass interCell = currentCells.getUpCell(cellPos);
interactions[counterExistingCell] = interCell;
interactionsPosition[counterExistingCell] = interactionsPosition[idxInter];
counterExistingCell += 1;
}
}
}
kernel->M2L( cell , interactions, interactionsPosition, counterExistingCell, idxLevel);
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__transferInPassCallback(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, CudaKernelClass* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
FCuda__transferInPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>
<<<inGridSize, inBlocksSize, 0, currentStream>>>(currentCellsPtr, currentCellsSize,
currentCellsUpPtr, currentCellsDownPtr,
idxLevel, kernel);
FCudaCheckAfterCall();
FCudaCheck(cudaStreamSynchronize(currentStream));
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__transferInoutPassPerform(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, CudaKernelClass* kernel){
CellContainerClass currentCells(currentCellsPtr, currentCellsSize, nullptr, currentCellsDownPtr);
CellContainerClass cellsOther(externalCellsPtr, externalCellsSize, externalCellsUpPtr, nullptr);
if(mode == 1){
for(int cellIdx = blockIdx.x ; cellIdx < nbSafeInteractions ; cellIdx += gridDim.x){
for(int outInterIdx = safeInteractions[cellIdx] ; outInterIdx < safeInteractions[cellIdx+1] ; ++outInterIdx){
typename CellContainerClass::CompleteCellClass interCell = cellsOther.getUpCell(outsideInteractions[outInterIdx].outsideIdxInBlock);
FCudaAssertLF(interCell.symb->mortonIndex == outsideInteractions[outInterIdx].outIndex);
typename CellContainerClass::CompleteCellClass cell = currentCells.getDownCell(outsideInteractions[outInterIdx].insideIdxInBlock);
FCudaAssertLF(cell.symb->mortonIndex == outsideInteractions[outInterIdx].insideIndex);
kernel->M2L( cell , &interCell, &outsideInteractions[outInterIdx].relativeOutPosition, 1, idxLevel);
}
}
}
else{
for(int cellIdx = blockIdx.x ; cellIdx < nbSafeInteractions ; cellIdx += gridDim.x){
for(int outInterIdx = safeInteractions[cellIdx] ; outInterIdx < safeInteractions[cellIdx+1] ; ++outInterIdx){
typename CellContainerClass::CompleteCellClass cell = cellsOther.getUpCell(outsideInteractions[outInterIdx].insideIdxInBlock);
FCudaAssertLF(cell.symb->mortonIndex == outsideInteractions[outInterIdx].insideIndex);
typename CellContainerClass::CompleteCellClass interCell = currentCells.getDownCell(outsideInteractions[outInterIdx].outsideIdxInBlock);
FCudaAssertLF(interCell.symb->mortonIndex == outsideInteractions[outInterIdx].outIndex);
const int otherPosition = FMGetOppositeInterIndex(outsideInteractions[outInterIdx].relativeOutPosition);
kernel->M2L( interCell , &cell, &otherPosition, 1, idxLevel);
}
}
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__transferInoutPassCallback(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions,
CudaKernelClass* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
OutOfBlockInteraction* cuOutsideInteractions;
FCudaCheck( cudaMalloc(&cuOutsideInteractions,nbOutsideInteractions*sizeof(OutOfBlockInteraction)) );
FCudaCheck( cudaMemcpy( cuOutsideInteractions, outsideInteractions, nbOutsideInteractions*sizeof(OutOfBlockInteraction),
cudaMemcpyHostToDevice ) );
int* cuSafeInteractions;
FCudaCheck( cudaMalloc(&cuSafeInteractions,(nbSafeInteractions+1)*sizeof(int)) );
FCudaCheck( cudaMemcpy( cuSafeInteractions, safeInteractions, (nbSafeInteractions+1)*sizeof(int),
cudaMemcpyHostToDevice ) );
FCuda__transferInoutPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>
<<<inGridSize, inBlocksSize, 0, currentStream>>>(currentCellsPtr, currentCellsSize,
currentCellsDownPtr,
externalCellsPtr, externalCellsSize,
externalCellsUpPtr,
idxLevel, mode,
cuOutsideInteractions, nbOutsideInteractions,
cuSafeInteractions, nbSafeInteractions,
kernel);
FCudaCheckAfterCall();
FCudaCheck(cudaStreamSynchronize(currentStream));
FCudaCheck(cudaFree(cuOutsideInteractions));
FCudaCheck(cudaFree(cuSafeInteractions));
}
/////////////////////////////////////////////////////////////////////////////////////
/// Downard Pass
/////////////////////////////////////////////////////////////////////////////////////
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__downardPassPerform(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, CudaKernelClass* kernel){
CellContainerClass currentCells(currentCellsPtr, currentCellsSize,nullptr,currentCellsDownPtr);
CellContainerClass subCellGroup(childCellsPtr, childCellsSize,nullptr,childCellsDownPtr);
const MortonIndex firstParent = FCudaMax(currentCells.getStartingIndex(), subCellGroup.getStartingIndex()>>3);
const MortonIndex lastParent = FCudaMin(currentCells.getEndingIndex()-1, (subCellGroup.getEndingIndex()-1)>>3);
int idxParentCell = currentCells.getCellIndex(firstParent);
int idxChildCell = subCellGroup.getFistChildIdx(firstParent);
while(true){
typename CellContainerClass::CompleteCellClass cell = currentCells.getDownCell(idxParentCell);
typename CellContainerClass::CompleteCellClass child[8];
for(int idxChild = 0 ; idxChild < 8 ; ++idxChild){
child[idxChild].symb = nullptr;
}
do{
const int idxChild = ((subCellGroup.getCellMortonIndex(idxChildCell)) & 7);
child[idxChild] = subCellGroup.getDownCell(idxChildCell);
idxChildCell += 1;
}while(idxChildCell != subCellGroup.getNumberOfCellsInBlock() && cell.symb->mortonIndex == (subCellGroup.getCellMortonIndex(idxChildCell)>>3));
kernel->L2L(cell, child, idxLevel);
if(currentCells.getCellMortonIndex(idxParentCell) == lastParent){
break;
}
idxParentCell += 1;
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__downardPassCallback(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, CudaKernelClass* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
FCuda__downardPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>
<<<inGridSize, inBlocksSize, 0, currentStream>>>
(currentCellsPtr, currentCellsSize, currentCellsDownPtr, childCellsPtr, childCellsSize, childCellsDownPtr,
idxLevel, kernel);
FCudaCheckAfterCall();
FCudaCheck(cudaStreamSynchronize(currentStream));
}
/////////////////////////////////////////////////////////////////////////////////////
/// Direct Pass MPI
/////////////////////////////////////////////////////////////////////////////////////
#ifdef SCALFMM_USE_MPI
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__directInoutPassPerformMpi(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, CudaKernelClass* kernel){
ParticleContainerGroupClass containers(containersPtr, containersSize, containersDownPtr);
ParticleContainerGroupClass containersOther(externalContainersPtr, externalContainersSize, nullptr);
for(int leafIdx = blockIdx.x ; leafIdx < counterOuterCell ; leafIdx += gridDim.x){
for(int outInterIdx = safeOuterInteractions[leafIdx] ; outInterIdx < safeOuterInteractions[leafIdx+1] ; ++outInterIdx){
const int leafPos = containersOther.getLeafIndex(outsideInteractions[outInterIdx].outIndex);
if(leafPos != -1){
ParticleGroupClass interParticles = containersOther.template getLeaf<ParticleGroupClass>(leafPos);
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(outsideInteractions[outInterIdx].insideIdxInBlock);
kernel->P2PRemote( FCudaTreeCoordinate::GetPositionFromMorton(outsideInteractions[outInterIdx].insideIndex, treeHeight-1),
&particles, &particles , &interParticles, &outsideInteractions[outInterIdx].relativeOutPosition, 1);
}
}
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__directInoutPassCallbackMpi(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, CudaKernelClass* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
OutOfBlockInteraction* cuOutsideInteractions;
FCudaCheck( cudaMalloc(&cuOutsideInteractions,nbOutsideInteractions*sizeof(OutOfBlockInteraction)) );
FCudaCheck( cudaMemcpy( cuOutsideInteractions, outsideInteractions, nbOutsideInteractions*sizeof(OutOfBlockInteraction),
cudaMemcpyHostToDevice ) );
int* cuSafeOuterInteractions;
FCudaCheck( cudaMalloc(&cuSafeOuterInteractions,(counterOuterCell+1)*sizeof(int)) );
FCudaCheck( cudaMemcpy( cuSafeOuterInteractions, safeOuterInteractions, (counterOuterCell+1)*sizeof(int),
cudaMemcpyHostToDevice ) );
FCuda__directInoutPassPerformMpi
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>
<<<inGridSize, inBlocksSize, 0, currentStream>>>(containersPtr, containersSize, containersDownPtr,
externalContainersPtr, externalContainersSize,
cuOutsideInteractions, nbOutsideInteractions, cuSafeOuterInteractions, counterOuterCell,
treeHeight, kernel);
FCudaCheckAfterCall();
FCudaCheck(cudaStreamSynchronize(currentStream));
FCudaCheck(cudaFree(cuOutsideInteractions));
FCudaCheck(cudaFree(cuSafeOuterInteractions));
}
#endif
/////////////////////////////////////////////////////////////////////////////////////
/// Direct Pass
/////////////////////////////////////////////////////////////////////////////////////
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__directInPassPerform(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, CudaKernelClass* kernel){
ParticleContainerGroupClass containers(containersPtr, containersSize, containersDownPtr);
const MortonIndex blockStartIdx = containers.getStartingIndex();
const MortonIndex blockEndIdx = containers.getEndingIndex();
for(int leafIdx = blockIdx.x ; leafIdx < containers.getNumberOfLeavesInBlock() ; leafIdx += gridDim.x){
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(leafIdx);
const MortonIndex mindex = containers.getLeafMortonIndex(leafIdx);
MortonIndex interactionsIndexes[26];
int interactionsPosition[26];
const int3 coord = FCudaTreeCoordinate::GetPositionFromMorton(mindex, treeHeight-1);
int counter = FCudaTreeCoordinate::GetNeighborsIndexes(coord, treeHeight,interactionsIndexes,interactionsPosition);
ParticleGroupClass interactionsObjects[26];
int counterExistingCell = 0;
for(int idxInter = 0 ; idxInter < counter ; ++idxInter){
if( blockStartIdx <= interactionsIndexes[idxInter] && interactionsIndexes[idxInter] < blockEndIdx ){
const int leafPos = containers.getLeafIndex(interactionsIndexes[idxInter]);
if(leafPos != -1){
interactionsObjects[counterExistingCell] = containers.template getLeaf<ParticleGroupClass>(leafPos);
interactionsPosition[counterExistingCell] = interactionsPosition[idxInter];
counterExistingCell += 1;
}
}
}
kernel->P2P( coord, &particles, &particles , interactionsObjects, interactionsPosition, counterExistingCell);
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__directInPassCallback(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, CudaKernelClass* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
FCuda__directInPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>
<<<inGridSize, inBlocksSize, 0, currentStream>>>(containersPtr, containersSize, containersDownPtr,
treeHeight, kernel);
FCudaCheckAfterCall();
FCudaCheck(cudaStreamSynchronize(currentStream));
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__directInoutPassPerform(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell,
const int treeHeight, CudaKernelClass* kernel){
ParticleContainerGroupClass containers(containersPtr, containersSize, containersDownPtr);
ParticleContainerGroupClass containersOther(externalContainersPtr, externalContainersSize, externalContainersDownPtr);
for(int leafIdx = blockIdx.x ; leafIdx < counterOuterCell ; leafIdx += gridDim.x){
for(int outInterIdx = safeOuterInteractions[leafIdx] ; outInterIdx < safeOuterInteractions[leafIdx+1] ; ++outInterIdx){
ParticleGroupClass interParticles = containersOther.template getLeaf<ParticleGroupClass>(outsideInteractions[outInterIdx].outsideIdxInBlock);
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(outsideInteractions[outInterIdx].insideIdxInBlock);
FCudaAssertLF(containersOther.getLeafMortonIndex(outsideInteractions[outInterIdx].outsideIdxInBlock) == outsideInteractions[outInterIdx].outIndex);
FCudaAssertLF(containers.getLeafMortonIndex(outsideInteractions[outInterIdx].insideIdxInBlock) == outsideInteractions[outInterIdx].insideIndex);
kernel->P2POuter( FCudaTreeCoordinate::GetPositionFromMorton(outsideInteractions[outInterIdx].insideIndex, treeHeight-1),
&particles , &interParticles, &outsideInteractions[outInterIdx].relativeOutPosition, 1);
}
}
for(int leafIdx = blockIdx.x ; leafIdx < counterInnerCell ; leafIdx += gridDim.x){
for(int outInterIdx = safeInnterInteractions[leafIdx] ; outInterIdx < safeInnterInteractions[leafIdx+1] ; ++outInterIdx){
ParticleGroupClass interParticles = containersOther.template getLeaf<ParticleGroupClass>(insideInteractions[outInterIdx].outsideIdxInBlock);
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(insideInteractions[outInterIdx].insideIdxInBlock);
FCudaAssertLF(containersOther.getLeafMortonIndex(insideInteractions[outInterIdx].outsideIdxInBlock) == insideInteractions[outInterIdx].outIndex);
FCudaAssertLF(containers.getLeafMortonIndex(insideInteractions[outInterIdx].insideIdxInBlock) == insideInteractions[outInterIdx].insideIndex);
const int otherPosition = FMGetOppositeNeighIndex(insideInteractions[outInterIdx].relativeOutPosition);
kernel->P2POuter( FCudaTreeCoordinate::GetPositionFromMorton(insideInteractions[outInterIdx].outIndex, treeHeight-1),
&interParticles , &particles, &otherPosition, 1);
}
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__directInoutPassCallback(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell,
const int treeHeight, CudaKernelClass* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
OutOfBlockInteraction* cuOutsideInteractions;
FCudaCheck( cudaMalloc(&cuOutsideInteractions,nbOutsideInteractions*sizeof(OutOfBlockInteraction)) );
FCudaCheck( cudaMemcpy( cuOutsideInteractions, outsideInteractions, nbOutsideInteractions*sizeof(OutOfBlockInteraction),
cudaMemcpyHostToDevice ) );
OutOfBlockInteraction* cuInsideInteractions;
FCudaCheck( cudaMalloc(&cuInsideInteractions,nbOutsideInteractions*sizeof(OutOfBlockInteraction)) );
FCudaCheck( cudaMemcpy( cuInsideInteractions, insideInteractions, nbOutsideInteractions*sizeof(OutOfBlockInteraction),
cudaMemcpyHostToDevice ) );
int* cuSafeOuterInteractions;
FCudaCheck( cudaMalloc(&cuSafeOuterInteractions,(counterOuterCell+1)*sizeof(int)) );
FCudaCheck( cudaMemcpy( cuSafeOuterInteractions, safeOuterInteractions, (counterOuterCell+1)*sizeof(int),
cudaMemcpyHostToDevice ) );
int* cuSafeInnterInteractions;
FCudaCheck( cudaMalloc(&cuSafeInnterInteractions,(counterInnerCell+1)*sizeof(int)) );
FCudaCheck( cudaMemcpy( cuSafeInnterInteractions, safeInnterInteractions, (counterInnerCell+1)*sizeof(int),
cudaMemcpyHostToDevice ) );
FCuda__directInoutPassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>
<<<inGridSize, inBlocksSize, 0, currentStream>>>(containersPtr, containersSize,containersDownPtr,
externalContainersPtr, externalContainersSize,externalContainersDownPtr,
cuOutsideInteractions, nbOutsideInteractions,
cuSafeOuterInteractions,counterOuterCell,
cuInsideInteractions,
cuSafeInnterInteractions , counterInnerCell,
treeHeight, kernel);
FCudaCheckAfterCall();
FCudaCheck(cudaStreamSynchronize(currentStream));
FCudaCheck(cudaFree(cuOutsideInteractions));
FCudaCheck(cudaFree(cuInsideInteractions));
FCudaCheck(cudaFree(cuSafeOuterInteractions));
FCudaCheck(cudaFree(cuSafeInnterInteractions));
}
/////////////////////////////////////////////////////////////////////////////////////
/// Merge Pass
/////////////////////////////////////////////////////////////////////////////////////
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__global__ void FCuda__mergePassPerform(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
CudaKernelClass* kernel){
CellContainerClass leafCells(leafCellsPtr,leafCellsSize, nullptr, leafCellsDownPtr);
ParticleContainerGroupClass containers(containersPtr,containersSize, containersDownPtr);
for(int cellIdx = blockIdx.x ; cellIdx < leafCells.getNumberOfCellsInBlock() ; cellIdx += gridDim.x){
typename CellContainerClass::CompleteCellClass cell = leafCells.getDownCell(cellIdx);
FCudaAssertLF(cell.symb->mortonIndex == leafCells.getCellMortonIndex(cellIdx));
ParticleGroupClass particles = containers.template getLeaf<ParticleGroupClass>(cellIdx);
FCudaAssertLF(leafCells.getCellMortonIndex(cellIdx) == containers.getLeafMortonIndex(cellIdx));
kernel->L2P(cell, &particles);
}
}
template <class SymboleCellClass, class PoleCellClass, class LocalCellClass,
class CellContainerClass, class ParticleContainerGroupClass, class ParticleGroupClass, class CudaKernelClass>
__host__ void FCuda__mergePassCallback(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
CudaKernelClass* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize){
FCuda__mergePassPerform
<SymboleCellClass, PoleCellClass, LocalCellClass,
CellContainerClass, ParticleContainerGroupClass, ParticleGroupClass, CudaKernelClass>
<<<inGridSize, inBlocksSize, 0, currentStream>>>(leafCellsPtr, leafCellsSize,leafCellsDownPtr,
containersPtr, containersSize,containersDownPtr,
kernel);
FCudaCheckAfterCall();
FCudaCheck(cudaStreamSynchronize(currentStream));
}
template <class CudaKernelClass>
CudaKernelClass* FCuda__BuildCudaKernel(void* kernel){
return CudaKernelClass::InitKernelKernel(kernel);
}
template <class CudaKernelClass>
void FCuda__ReleaseCudaKernel(CudaKernelClass* cukernel){
CudaKernelClass::ReleaseKernel(cukernel);
}
template <class CudaKernelClass>
dim3 FCuda__GetGridSize(CudaKernelClass* /*kernel*/, int intervalSize){
return CudaKernelClass::GetGridSize(intervalSize);
}
template <class CudaKernelClass>
dim3 FCuda__GetBlockSize(CudaKernelClass* /*kernel*/){
return CudaKernelClass::GetBlocksSize();
}
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
#include "FCudaGroupOfCells.hpp"
#include "FCudaGroupAttachedLeaf.hpp"
#include "FCudaGroupOfParticles.hpp"
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
#include "FCudaEmptyKernel.hpp"
#include "FCudaEmptyCellSymb.hpp"
template void FCuda__bottomPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FCudaEmptyKernel<int>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FCudaEmptyKernel<int>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FCudaEmptyKernel<int>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FCudaEmptyKernel<int>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FCudaEmptyKernel<int>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FCudaEmptyKernel<int>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FCudaEmptyKernel<int>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FCudaEmptyKernel<int>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell,
const int treeHeight, FCudaEmptyKernel<int>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<int,0,0,int>, FCudaGroupAttachedLeaf<int,0,0,int>, FCudaEmptyKernel<int> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FCudaEmptyKernel<int>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FCudaEmptyKernel<int>* FCuda__BuildCudaKernel< FCudaEmptyKernel<int> >(void* kernel);
template void FCuda__ReleaseCudaKernel< FCudaEmptyKernel<int> >(FCudaEmptyKernel<int>* cukernel);
template dim3 FCuda__GetGridSize< FCudaEmptyKernel<int> >(FCudaEmptyKernel<int>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FCudaEmptyKernel<int> >(FCudaEmptyKernel<int>* cukernel);
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
#include "../TestKernel/FCudaTestKernels.hpp"
#include "../TestKernel/FTestCellPOD.hpp"
template void FCuda__bottomPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FTestCudaKernels<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FTestCudaKernels<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FTestCudaKernels<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FTestCudaKernels<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FTestCudaKernels<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FTestCudaKernels<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FTestCudaKernels<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FTestCudaKernels<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FTestCudaKernels<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<float,0, 1, long long int>, FCudaGroupAttachedLeaf<float,0, 1, long long int>, FTestCudaKernels<float> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FTestCudaKernels<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FTestCudaKernels<float>* FCuda__BuildCudaKernel<FTestCudaKernels<float>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FTestCudaKernels<float>>(FTestCudaKernels<float>* cukernel);
template dim3 FCuda__GetGridSize< FTestCudaKernels<float> >(FTestCudaKernels<float>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FTestCudaKernels<float> >(FTestCudaKernels<float>* cukernel);
template void FCuda__bottomPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FTestCudaKernels<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FTestCudaKernels<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FTestCudaKernels<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FTestCudaKernels<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FTestCudaKernels<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FTestCudaKernels<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FTestCudaKernels<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FTestCudaKernels<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FTestCudaKernels<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FTestCellPODCore, FTestCellPODData, FTestCellPODData, FCudaGroupOfCells<FTestCellPODCore, FTestCellPODData, FTestCellPODData>,
FCudaGroupOfParticles<double,0, 1, long long int>, FCudaGroupAttachedLeaf<double,0, 1, long long int>, FTestCudaKernels<double> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FTestCudaKernels<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FTestCudaKernels<double>* FCuda__BuildCudaKernel<FTestCudaKernels<double>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FTestCudaKernels<double>>(FTestCudaKernels<double>* cukernel);
template dim3 FCuda__GetGridSize< FTestCudaKernels<double> >(FTestCudaKernels<double>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FTestCudaKernels<double> >(FTestCudaKernels<double>* cukernel);
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
#include "../P2P/FCudaP2P.hpp"
template void FCuda__bottomPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FCudaP2P<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FCudaP2P<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FCudaP2P<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FCudaP2P<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FCudaP2P<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FCudaP2P<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FCudaP2P<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FCudaP2P<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FCudaP2P<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FCudaP2P<float> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FCudaP2P<float>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FCudaP2P<float>* FCuda__BuildCudaKernel<FCudaP2P<float>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FCudaP2P<float>>(FCudaP2P<float>* cukernel);
template dim3 FCuda__GetGridSize< FCudaP2P<float> >(FCudaP2P<float>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FCudaP2P<float> >(FCudaP2P<float>* cukernel);
template void FCuda__bottomPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FCudaP2P<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FCudaP2P<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FCudaP2P<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FCudaP2P<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FCudaP2P<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FCudaP2P<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FCudaP2P<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FCudaP2P<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FCudaP2P<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FCudaEmptyCellSymb, int, int, FCudaGroupOfCells<FCudaEmptyCellSymb, int, int>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FCudaP2P<double> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FCudaP2P<double>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FCudaP2P<double>* FCuda__BuildCudaKernel<FCudaP2P<double>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FCudaP2P<double>>(FCudaP2P<double>* cukernel);
template dim3 FCuda__GetGridSize< FCudaP2P<double> >(FCudaP2P<double>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FCudaP2P<double> >(FCudaP2P<double>* cukernel);
/////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////
#include "../Uniform/FUnifCuda.hpp"
template void FCuda__bottomPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FUnifCuda<float,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FUnifCuda<float,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FUnifCuda<float,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FUnifCuda<float,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FUnifCuda<float,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FUnifCuda<float,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FUnifCuda<float,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FUnifCuda<float,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FUnifCuda<float,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,5>,FCudaUnifCellPODLocal<float,5>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,5> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FUnifCuda<float,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FUnifCuda<float,5>* FCuda__BuildCudaKernel<FUnifCuda<float,5>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FUnifCuda<float,5>>(FUnifCuda<float,5>* cukernel);
template dim3 FCuda__GetGridSize< FUnifCuda<float,5> >(FUnifCuda<float,5>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FUnifCuda<float,5> >(FUnifCuda<float,5>* cukernel);
template void FUnifCudaFillObject(void* cudaKernel, const FUnifCudaSharedData<double,5>& hostData);
template void FCuda__bottomPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FUnifCuda<double,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FUnifCuda<double,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FUnifCuda<double,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FUnifCuda<double,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FUnifCuda<double,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FUnifCuda<double,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FUnifCuda<double,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FUnifCuda<double,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FUnifCuda<double,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,5>,FCudaUnifCellPODLocal<double,5>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,5> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FUnifCuda<double,5>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FUnifCuda<double,5>* FCuda__BuildCudaKernel<FUnifCuda<double,5>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FUnifCuda<double,5>>(FUnifCuda<double,5>* cukernel);
template dim3 FCuda__GetGridSize< FUnifCuda<double,5> >(FUnifCuda<double,5>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FUnifCuda<double,5> >(FUnifCuda<double,5>* cukernel);
template void FUnifCudaFillObject(void* cudaKernel, const FUnifCudaSharedData<float,5>& hostData);
template void FCuda__bottomPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FUnifCuda<float,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FUnifCuda<float,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FUnifCuda<float,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FUnifCuda<float,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FUnifCuda<float,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FUnifCuda<float,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FUnifCuda<float,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FUnifCuda<float,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FUnifCuda<float,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<float,7>,FCudaUnifCellPODLocal<float,7>>,
FCudaGroupOfParticles<float,1, 4, float>, FCudaGroupAttachedLeaf<float,1, 4, float>, FUnifCuda<float,7> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FUnifCuda<float,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FUnifCuda<float,7>* FCuda__BuildCudaKernel<FUnifCuda<float,7>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FUnifCuda<float,7>>(FUnifCuda<float,7>* cukernel);
template dim3 FCuda__GetGridSize< FUnifCuda<float,7> >(FUnifCuda<float,7>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FUnifCuda<float,7> >(FUnifCuda<float,7>* cukernel);
template void FUnifCudaFillObject(void* cudaKernel, const FUnifCudaSharedData<double,7>& hostData);
template void FCuda__bottomPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsUpPtr,
unsigned char* containersPtr, std::size_t containersSize,
FUnifCuda<double,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__upwardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsUpPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsUpPtr,
int idxLevel, FUnifCuda<double,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__transferInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize, unsigned char* externalCellsUpPtr,
int idxLevel, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int* safeInteractions, int nbSafeInteractions, FUnifCuda<double,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__transferInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsUpPtr, unsigned char* currentCellsDownPtr,
int idxLevel, FUnifCuda<double,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__transferInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize,
unsigned char* currentCellsDownPtr,
unsigned char* externalCellsPtr, std::size_t externalCellsSize,
unsigned char* externalCellsUpPtr,
int idxLevel, int mode, const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions,
const int* safeInteractions, int nbSafeInteractions, FUnifCuda<double,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__downardPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* currentCellsPtr, std::size_t currentCellsSize, unsigned char* currentCellsDownPtr,
unsigned char* childCellsPtr, std::size_t childCellsSize, unsigned char* childCellsDownPtr,
int idxLevel, FUnifCuda<double,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#ifdef SCALFMM_USE_MPI
template void FCuda__directInoutPassCallbackMpi<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize,
const OutOfBlockInteraction* outsideInteractions,
int nbOutsideInteractions, const int safeOuterInteractions[], const int counterOuterCell,
const int treeHeight, FUnifCuda<double,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
#endif
template void FCuda__directInPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
const int treeHeight, FUnifCuda<double,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__directInoutPassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
unsigned char* externalContainersPtr, std::size_t externalContainersSize, unsigned char* externalContainersDownPtr,
const OutOfBlockInteraction* outsideInteractions, int nbOutsideInteractions,
const int safeOuterInteractions[], const int counterOuterCell,
const OutOfBlockInteraction* insideInteractions,
const int safeInnterInteractions[], const int counterInnerCell, const int treeHeight, FUnifCuda<double,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template void FCuda__mergePassCallback<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>, FCudaGroupOfCells<FBasicCellPOD, FCudaUnifCellPODPole<double,7>,FCudaUnifCellPODLocal<double,7>>,
FCudaGroupOfParticles<double,1, 4, double>, FCudaGroupAttachedLeaf<double,1, 4, double>, FUnifCuda<double,7> >
(unsigned char* leafCellsPtr, std::size_t leafCellsSize, unsigned char* leafCellsDownPtr,
unsigned char* containersPtr, std::size_t containersSize, unsigned char* containersDownPtr,
FUnifCuda<double,7>* kernel, cudaStream_t currentStream,
const dim3 inGridSize, const dim3 inBlocksSize);
template FUnifCuda<double,7>* FCuda__BuildCudaKernel<FUnifCuda<double,7>>(void* kernel);
template void FCuda__ReleaseCudaKernel<FUnifCuda<double,7>>(FUnifCuda<double,7>* cukernel);
template dim3 FCuda__GetGridSize< FUnifCuda<double,7> >(FUnifCuda<double,7>* kernel, int intervalSize);
template dim3 FCuda__GetBlockSize< FUnifCuda<double,7> >(FUnifCuda<double,7>* cukernel);
template void FUnifCudaFillObject(void* cudaKernel, const FUnifCudaSharedData<float,7>& hostData);
|
f60bcb525dfadf2dd5260145cca568d39fc017ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************************
bestfit.c
Iterate over all floating parameters, at each step adjusting just one parameter x in order
to minimize objective(x), the objective function (reduced chi-square plus penalties).
Continue until the fractional reduction in objective(x) due to a full pass through the
parameter list is less than term_prec. Return the final value of the objective function.
__________________________________________________________________________________________
Modified 2016 July 7 by Matt Engels:
Adapted for use in shape-cuda.
------------------------------------------------------------------------------------------
Modified 2014 February 19 by CM:
Allow for multiple optical scattering laws when setting the "vary_hapke" flag
Modified 2013 July 14 by CM:
Implement the "term_maxiter" parameter
Modified 2012 July 5 by MCN and CM:
Use the gethostname function rather than the HOST environment variable to get root's
hostname
List root's PID in addition to the hostname
List the PID for each branch node, not just the hostname
Modified 2012 June 13 by CM:
Implement "objfunc_start" parameter
Modified 2012 March 23 by CM:
Implement Doppler scaling -- more particularly, simultaneous adjustment of shape/spin
parameters and Doppler scale factors via the "vary_dopscale" parameter
Modified 2010 April 12 by CM:
Bug fix: When fitting a size, shape, or spin parameter with the
"vary_delcor0" parameter being used, call realize_delcor to reset
the 0th-order delay correction polynomial coefficients to their
saved values before calling vary_params. (For infinitely fine
model resolution and delay-Doppler resolution this wouldn't
matter but in practice it does.)
Modified 2009 November 15 by CM:
Fix printf statement with too many arguments
Modified 2009 July 5 by CM:
Add "npar_update" parameter rather than hard-wiring an update (rewrite
mod and obs files and display reduced chi2 and penalty functions)
every 20th parameter adjustment
Modified 2009 April 3 by CM:
If the model has illegal properties (e.g., negative ellipsoid diameters)
then, for each type of problem, multiply the objective function not
only by the "bad_objfactor" parameter but also by an additional
factor that increases as the problem gets worse. The
"baddiam_logfactor" "badphoto_logfactor" "posbnd_logfactor"
"badposet_logfactor" and "badradar_logfactor" parameters are the
logarithms of the additional factors for the five possible problem
types; the calc_fits routine computes the logarithms rather than the
factors themselves so as to avoid floating-point overflow.
Revise MPI_CALC so that root receives the "posbnd_logfactor" parameter
from each branch node rather than the "posbnd" parameter:
posbnd_logfactor > 0.0 if the model extends beyond the POS frame
for any of the branch node's datasets. If root sees that this
value is > 0.0, it will set its "posbnd" flag and will increase the
objective function accordingly.
Revise MPI_CALC so that root receives the "badposet_logfactor"
parameter from each branch node: badposet_logfactor > 0.0 if the
model extends beyond the fit frame for any of the branch node's
plane-of-sky datasets. If root sees that this value is > 0.0, it
will set its "badposet" flag and will increase the objective
function accordingly.
Revise MPI_CALC so that root receives the "badradar_logfactor"
parameter from each branch node: badradar_logfactor > 0.0 if the
model is too wide in delay-Doppler space for the program to
construct some or all (delay-)Doppler fit frames. If root sees
that this value is > 0.0, it will set its "badradar" flag and will
increase the objective function accordingly.
For MPI_Recv calls, mpi_par[0] is no longer equal to the MPI action,
since the message tag argument already serves that purpose (as of
2008 April 10) -- so the other mpi_par elements are renumbered
Modified 2008 August 10 by CM:
Never terminate the fit at the end of a partial iteration -- that is,
after the first iteration of a fit where first_fitpar > 0
Modified 2008 July 11 by CM:
Display the hostname even for single-processor fits
Modified 2008 April 10 by CM:
For parallel-processing fits, display the hostname for each node
Use message tag argument to MPI_Recv to identify the MPI action
Modified 2007 August 29 by CM:
Implement the "avoid_badpos" parameter: if this parameter is turned on
and the model extends beyond the POS frame and it is time to fit a
size parameter, start by shrinking that size parameter until the
model fits within the POS frame
Implement the "bad_objfactor" parameter in routine objective: multiply
the objective function by this factor for illegal photometric
parameters, for tiny or negative ellipsoid diameters, and for
models that extend beyond the plane-of-sky frame. (Previously
this factor was fixed at 2.0.)
Rename MPI_TAG to MPI_TAG_1 to avoid name conflict with mpich headers
Modified 2007 August 16 by CM:
Implement the "term_badmodel" parameter: If this parameter is turned on
and, at the end of any fit iteration, the model ever extends beyond
the POS frame OR has any illegal photometric parameters OR has any
tiny or negative ellipsoid diameters, the fit is terminated.
Modified 2007 August 10 by CM:
Eliminate unused variables
Modified 2006 December 20 by CM:
Revise MPI_CALC so that root receives the "posbnd" parameter from each
branch node, so that the objective function can be doubled if the
model extends beyond the plane-of-sky frame for any datasets
If the model extends beyond the plane-of-sky frame for any trial value
of a parameter, evaluate the model for the best-fit parameter value
to check whether or not it extends beyond the POS frame
Modified 2006 October 1 by CM:
Add two new arguments to realize_delcor
Add three new arguments to realize_photo
Implement "vary_delcor0" "vary_radalb" and "vary_optalb" parameters
Implement SIZEPAR parameters via the "newsize" variable
Modified 2005 June 27 by CM:
Renamed "round" function to "iround" to avoid conflict
Modified 2005 March 17 by CM:
For parallel processing, check that root is receiving the responses
to the correct broadcast
Root no longer needs to compute degrees of freedom or to receive
dof values from branch nodes: Now they are computed in read_dat
Degrees of freedom can now be floating-point rather than integer
Modified 2005 February 28 by CM:
Add screen warnings if objective function has been doubled due to
(a) tiny or negative ellipsoid diameters
(b) illegal photometric parameters
(c) model extending beyond the model POS frame
Initialize the three parameters (baddiam, badphoto, posbnd) that
flag these three problems in other routines (realize_mod,
realize_photo, calc_fits) rather than in objective(x), so that
these three parameters can be used for actions other than "fit"
Rename DATAPAR to be DELCORPAR
Add XYOFFPAR and implement the new realize_xyoff routine
Modified 2005 February 22 by CM:
Move branch nodes' signoff statements from shape.c to here, so that
they can appear in order
Modified 2005 February 13 by CM:
Rename objective function "f(x)" to be "objective(x)"
Only broadcast to branch nodes if there are any branch nodes
(i.e., if mpi_nproc > 1)
Broadcast the new MPI_DUMMYPAR signal to branch nodes before evaluating
objective(0.0), the objective function for the existing model;
this tells each branch node to point hotparam to a dummy variable
rather than to a model parameter, so that the dummy variable will
be set to 0.0 and the model will be unchanged.
Broadcast the new MPI_CALFACT signal to branch nodes to get updated
calibration factors before rewriting the obs file
Root now realizes the model after setting a parameter to its best value
Make sure that root and branch nodes update the model (i.e., that they
call the calc_fits and chi2 routines) before rewriting the mod and
obs files and before calling routine show_deldoplim
Avoid unnecessary model realizations for root by allowing newshape,
newspin, newphoto, and newdelcor to be 0, not always 1 as before
Move MPI_DONE broadcast to here from shape.c
Modified 2005 January 25 by CM:
Eliminated unused variable
Modified 2005 January 10 by CM:
When fitting using parallel processing, ping all of the branch nodes
and inform the user that they're active
Modified 2004 October 29 by CM:
Add "first_fitpar" parameter so that a fit can be started (or resumed)
at some parameter (counting from 0) other than the first parameter
Modified 2004 October 10 by CM:
Fix chi-square display at start of each iteration and at the
end of the fit by calling realize_mod, realize_spin, realize_photo,
realize_delcor, and calc_fits before calling chi2
Modified 2004 August 13 by CM:
Call modified minimum search routine brent_abs rather than brent
so that absolute fitting tolerances can be specified
Modified 2004 May 21 by CM:
Display the final values of the individual penalty functions
Modified 2004 April 3 by CM:
Add the "list_breakdown" argument to routine chi2 so that we can
display the chi2 breakdown by data type (Doppler, delay-Doppler,
POS, lightcurves) at the start of each fit iteration and at
the end of the fit
Modified 2004 February 26 by CM:
realize_photo now takes two arguments rather than one
Modified 2003 April 26 by CM:
Added "show_deldoplim" call at the end of each fit iteration,
to check for overly tight data vignetting
Modified 2003 April 23 by CM:
Implemented '=' state for delay correction polynomial coefficients
via the "realize_delcor" routine
Modified 2003 April 17 by CM:
Added "baddiam" parameter to function f so that the objective
function is doubled if an ellipsoid component has a tiny or
negative diameter
Modified 2003 April 2 by CM:
In function f (which computes reduced-chi-squared-plus-penalties),
moved call to "penalties" from before spar->showstate is set
to after.
Values of reduced chi-squared and of the various penalties are
printed to the screen after every 20th parameter adjustment.
To be precise, they're printed at the very first call to f when
adjusting parameter 21, 41, 61, etc. This call is made within
function bestfit by minimum-bracketing function mnbrak;
it corresponds to the *unadjusted* value of parameter 21 (or 41
or ...), which is what we want.
Until now, the individual penalty values were being printed on the
*second* call to f, also made by mnbrak but with parameter 21
incremented by the relevant initial step size (e.g., length_step).
Hence these printed values were irrelevant and misleadingly large.
Moving the call to "penalties" later in the code fixes the problem.
*****************************************************************************************/
extern "C" {
#include "../shape/head.h"
#include "../shape/shape-cuda.h"
}
static __device__ double *hotparam;
static struct par_t *spar, *sdev_par;
static struct mod_t *smod, *sdev_mod;
static struct dat_t *sdat, *sdev_dat;
static int newsize, newshape, newspin, newphoto, newdelcor, newdopscale, newxyoff,
showvals=0, vary_delcor0_size, vary_delcor0_shapespin, vary_dopscale_spin,
vary_dopscale_sizeshape, vary_alb_size, vary_alb_shapespin, vary_hapke,
call_vary_params, check_posbnd, check_badposet, check_badradar;
static double deldop_zmax, deldop_zmax_save, cos_subradarlat, cos_subradarlat_save,
rad_xsec, rad_xsec_save, opt_brightness, opt_brightness_save, baddiam_factor,
badphoto_factor, posbnd_factor, badposet_factor, badradar_factor,
baddopscale_factor;
static unsigned char type;
static double hotparamval;
double objective_cuda(double x);
__host__ double objective_cuda_streams(double x, struct vertices_t **verts,
unsigned char *htype, unsigned char *dtype, int *nframes, int *nviews,
int *lc_n, int nsets, int nf, hipStream_t *bf_stream);
__device__ double bf_hotparamval, bf_dummyval;
__device__ int bf_partype;
__global__ void bf_get_flags_krnl(struct par_t *dpar, unsigned char *flags) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
flags[0] = dpar->baddiam;
flags[1] = dpar->badphoto;
flags[2] = dpar->posbnd;
flags[3] = dpar->badposet;
flags[4] = dpar->badradar;
flags[5] = dpar->baddopscale;
}
}
__global__ void ocs_get_flags_krnl(struct par_t *dpar, unsigned char *flags,
double *dlogfactors) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
flags[0] = dpar->baddiam;
flags[1] = dpar->badphoto;
flags[2] = dpar->posbnd;
flags[3] = dpar->badposet;
flags[4] = dpar->badradar;
flags[5] = dpar->baddopscale;
dlogfactors[0] = dpar->bad_objfactor;
dlogfactors[1] = dpar->baddiam_logfactor;
dlogfactors[2] = dpar->badphoto_logfactor;
dlogfactors[3] = dpar->posbnd_logfactor;
dlogfactors[4] = dpar->badposet_logfactor;
dlogfactors[5] = dpar->badradar_logfactor;
dlogfactors[6] = dpar->baddopscale_logfactor;
}
}
__global__ void bf_set_hotparam_initial_krnl() {
/* Single-threaded kernel */
if (threadIdx.x == 0)
hotparam = &bf_dummyval;
}
__global__ void bf_set_hotparam_pntr_krnl(double **fpntr,
int *fpartype, int p) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
hotparam = fpntr[p]; /* This is pointing at a device variable */
bf_partype = fpartype[p]; /* parameter type */
}
}
__global__ void bf_get_hotparam_val_krnl() {
/* Single threaded kernel */
if (threadIdx.x == 0)
bf_hotparamval = *hotparam;
}
__global__ void bf_mult_hotparam_val_krnl(double factor) {
/* Single-threaded kernel */
if (threadIdx.x == 0)
*hotparam *= factor;
}
__global__ void bf_set_hotparam_val_krnl(double newvalue) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
*hotparam = newvalue;
bf_hotparamval = newvalue;
}
}
__global__ void set_verts_shortcut_krnl(struct mod_t *dmod, struct vertices_t **verts) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
verts[0] = &dmod->shape.comp[0].real;
}
}
__host__ double bestfit_CUDA(struct par_t *dpar, struct mod_t *dmod,
struct dat_t *ddat, struct par_t *par, struct mod_t *mod,
struct dat_t *dat)
{
char hostname[MAXLEN], dofstring[MAXLEN];
int i, iter=0, p, cntr, first_fitpar, partype, keep_iterating=1, ilaw;
long pid_long;
pid_t pid;
double beginerr, enderr, ax, bx, cx, obja, objb, objc, xmin,
final_chi2, final_redchi2, dummyval2, dummyval3, dummyval4,
delta_delcor0, dopscale_factor, radalb_factor, optalb_factor;
unsigned char *flags;
dim3 THD, BLK;
/* Get the hostname of host machine and the PID */
(void) gethostname(hostname, MAXLEN-1);
pid = getpid();
pid_long = (long) pid; /* Assumes pid_t fits in a long */
printf("#\n# CUDA fit (pid %ld on %s)\n", pid_long, hostname);
fflush(stdout);
gpuErrchk(hipMalloc((void**)&sdev_par, sizeof(struct par_t)));
gpuErrchk(hipMemcpy(sdev_par, &par, sizeof(struct par_t), hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&sdev_mod, sizeof(struct mod_t)));
gpuErrchk(hipMemcpy(sdev_mod, &mod, sizeof(struct mod_t), hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&sdev_dat, sizeof(struct dat_t)));
gpuErrchk(hipMemcpy(sdev_dat, &dat, sizeof(struct dat_t), hipMemcpyHostToDevice));
cudaCalloc1((void**)&flags, sizeof(unsigned char), 7);
/* Initialize static global pointers used by objective(x) below
to be compatible with "Numerical Recipes in C" routines */
spar = par; smod = mod; sdat = dat;
sdev_par = dpar; sdev_mod = dmod; sdev_dat = ddat;
/* Initialize static global parameters */
newsize = newshape = newspin = newphoto = newdelcor = newdopscale = newxyoff = 1;
deldop_zmax = deldop_zmax_save = 0.0;
cos_subradarlat = cos_subradarlat_save = 0.0;
rad_xsec = rad_xsec_save = 0.0;
opt_brightness = opt_brightness_save = 0.0;
vary_delcor0_size = (par->vary_delcor0 != VARY_NONE);
vary_delcor0_shapespin = (par->vary_delcor0 == VARY_ALL);
vary_dopscale_spin = (par->vary_dopscale != VARY_NONE);
vary_dopscale_sizeshape = (par->vary_dopscale == VARY_ALL);
vary_alb_size = (par->vary_radalb != VARY_NONE || par->vary_optalb != VARY_NONE);
vary_alb_shapespin = (par->vary_radalb == VARY_ALL || par->vary_optalb == VARY_ALL);
vary_hapke = 0;
if (par->vary_optalb != VARY_NONE)
for (ilaw=0; ilaw<mod->photo.noptlaws; ilaw++)
if (mod->photo.opttype[ilaw] == HAPKE || mod->photo.opttype[ilaw] == HARMHAPKE
|| mod->photo.opttype[ilaw] == INHOHAPKE)
vary_hapke = 1;
call_vary_params = (par->vary_delcor0 != VARY_NONE || par->vary_dopscale != VARY_NONE
|| par->vary_radalb != VARY_NONE
|| par->vary_optalb != VARY_NONE);
/* Initialize local parameters */
delta_delcor0 = 0.0;
dopscale_factor = radalb_factor = optalb_factor = 1.0;
type = mod->shape.comp[0].type;
/* Allocate memory for pointers, steps, and tolerances */
cudaCalloc1((void**)&fparstep, sizeof(double), par->nfpar);
cudaCalloc1((void**)&fpartol, sizeof(double), par->nfpar);
cudaCalloc1((void**)&fparabstol, sizeof(double), par->nfpar);
cudaCalloc1((void**)&fpartype, sizeof(int), par->nfpar);
cudaCalloc1((void**)&fpntr, sizeof(double*), par->nfpar);
for (i=0; i<par->nfpar; i++)
cudaCalloc1((void**)&fpntr[i], sizeof(double), 1);
/* The following call sets up the parameter lists allocated above */
mkparlist_cuda(dpar, dmod, ddat, fparstep, fpartol,
fparabstol, fpartype, fpntr);
/* Compute deldop_zmax_save, cos_subradarlat_save, rad_xsec_save, and
* opt_brightness_save for the initial model */
if (call_vary_params)
{
realize_mod_cuda(dpar, dmod, type);
if (AF) realize_spin_cuda_af(dpar, dmod, ddat, dat->nsets);
else if (STREAMS) realize_spin_cuda_streams(dpar, dmod, ddat, dat->nsets);
else realize_spin_cuda(dpar, dmod, ddat, dat->nsets);
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
/* realize_delcor and realize_dopscale were called by read_dat */
if (AF)
vary_params_af(dpar, dmod, ddat, par->action, &deldop_zmax_save,
&rad_xsec_save, &opt_brightness_save,
&cos_subradarlat_save, dat->nsets);
else if (STREAMS)
// vary_params_cuda_streams(dpar, dmod, ddat, par->action, &deldop_zmax_save,
// &rad_xsec_save, &opt_brightness_save,
// &cos_subradarlat_save, dat->nsets);
vary_params_cuda_streams2(dpar, dmod, ddat, par->action,
&deldop_zmax_save, &rad_xsec_save, &opt_brightness_save,
&cos_subradarlat_save, dat->nsets);
else
vary_params_cuda(dpar, dmod, ddat, par->action, &deldop_zmax_save,
&rad_xsec_save, &opt_brightness_save,
&cos_subradarlat_save, dat->nsets);
}
printf("rad_xsec: %f\n", rad_xsec_save);
printf("deldop_zmax: %f\n", (float)deldop_zmax_save);
/* Point hotparam to a dummy variable (dummyval) rather than to a model pa-
* rameter; then call objective(0.0) to set dummy variable = 0.0, realize
* the initial model, calculate the fits, return initial model's objective
* function as enderr. */
hipLaunchKernelGGL(( bf_set_hotparam_initial_krnl), dim3(1),dim3(1), 0, 0, );
checkErrorAfterKernelLaunch("bf_set_hotparam_initial_krnl");
enderr = objective_cuda(0.0);
printf("#\n# searching for best fit ...\n");
printf("%4d %8.6f to begin", 0, enderr);
/* Launch single-thread kernel to retrieve flags in dev_par */
/* flags[0] = dpar->baddiam;
flags[1] = dpar->badphoto;
flags[2] = dpar->posbnd;
flags[3] = dpar->badposet;
flags[4] = dpar->badradar;
flags[5] = dpar->baddopscale;*/
hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
/* Now act on the flags just retrieved from dev_par */
if (flags[0]) printf(" (BAD DIAMS)");
if (flags[1]) printf(" (BAD PHOTO)");
if (flags[2]) printf(" (BAD POS)");
if (flags[3]) printf(" (BAD POSET)");
if (flags[4]) printf(" (BAD RADAR)");
if (flags[5]) printf(" (BAD DOPSCALE)"); printf("\n");
fflush(stdout);
/* Display the region within each delay-Doppler or Doppler frame that, ac-
* cording to initial model, has nonzero power. A warning is displayed if
* any region extends beyond the data limits: the vignetting is too tight,
* or else some model parameter (such as a delay correction polynomial co-
* efficient) is seriously in error. */
show_deldoplim_cuda(dat, ddat);
/* Set the starting fit parameter for the first iteration only */
first_fitpar = par->first_fitpar;
if (first_fitpar < 0 || first_fitpar >= par->nfpar) {
printf("ERROR: need 0 <= first_fitpar < nparams (%d)\n", par->nfpar);
bailout("bestfit.c\n");
}
/* Iteratively adjust model; for each iteration, step through all free pa-
* rameters, adjusting one parameter at a time so as to minimize the objec-
* tive function at each step. Stop when fractional decrease in the objec-
* tive function from one iteration to the next is less than term_prec. */
// do {
showvals = 1; /* show reduced chi-square and penalties at beginning */
beginerr = enderr;
printf("# iteration %d %f", ++iter, beginerr);
/* Launch single-thread kernel to retrieve flags in dev_par */
hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
/* Now act on the flags just retrieved from dev_par */
if (flags[0]) printf(" (BAD DIAMS)");
if (flags[1]) printf(" (BAD PHOTO)");
if (flags[2]) printf(" (BAD POS)");
if (flags[3]) printf(" (BAD POSET)");
if (flags[4]) printf(" (BAD RADAR)");
if (flags[5]) printf(" (BAD DOPSCALE)"); printf("\n");
fflush(stdout);
/* Show breakdown of chi-square by data type */
if (AF)
chi2_cuda_af(dpar, ddat, 1, dat->nsets);
else if (STREAMS)
chi2_cuda_streams(dpar, ddat, 1, dat->nsets);
else
chi2_cuda(dpar, ddat, 1);
/* Loop through the free parameters */
cntr = first_fitpar % par->npar_update;
for (p=first_fitpar; p<1/*par->nfpar*/; p++) {
/* Adjust only parameter p on this try */
hipLaunchKernelGGL(( bf_set_hotparam_pntr_krnl), dim3(1),dim3(1), 0, 0, fpntr, fpartype, p);
checkErrorAfterKernelLaunch("bf_set_hotparam_pntr_krnl");
gpuErrchk(hipMemcpyFromSymbol(&partype, bf_partype, sizeof(int),
0, hipMemcpyDeviceToHost));
newsize = newshape = newspin = newphoto = newdelcor = newdopscale
= newxyoff = 0;
if (partype == SIZEPAR) newsize = 1;
else if (partype == SHAPEPAR) newshape = 1;
else if (partype == SPINPAR) newspin = 1;
else if (partype == PHOTOPAR) newphoto = 1;
else if (partype == DELCORPAR) newdelcor = 1;
else if (partype == DOPSCALEPAR) newdopscale = 1;
else if (partype == XYOFFPAR) newxyoff = 1;
/* If this is a size parameter AND model extends beyond POS frame
* AND the "avoid_badpos" parameter is turned on, shrink model by
* 5% at a time until it fits within the POS frame.
* We must start with the redundant model evaluation for the un-
* changed value of the size parameter, in case the first call to
* objective displays reduced chi-square and the penalty functions. */
if (par->avoid_badpos && partype == SIZEPAR) {
hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
/* Get value of (*hotparam) */
hipLaunchKernelGGL(( bf_get_hotparam_val_krnl), dim3(1),dim3(1), 0, 0, );
checkErrorAfterKernelLaunch("bf_get_hotparam_val_krnl");
gpuErrchk(hipMemcpyFromSymbol(&hotparamval, bf_hotparamval,
sizeof(double), 0, hipMemcpyDeviceToHost));
while (flags[2]) {
objective_cuda(hotparamval);
hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
if (flags[2]) {
/* Set the value pointed to by hotparam to 0.95 of its
* previous value */
hipLaunchKernelGGL(( bf_mult_hotparam_val_krnl), dim3(1),dim3(1), 0, 0, 0.95);
checkErrorAfterKernelLaunch("bf_mult_hotparam_val_krnl");
}
}
}
/* Get value of (*hotparam) so that mnbrak can use it*/
hipLaunchKernelGGL(( bf_get_hotparam_val_krnl), dim3(1),dim3(1), 0, 0, );
checkErrorAfterKernelLaunch("bf_get_hotparam_val_krnl");
gpuErrchk(hipMemcpyFromSymbol(&hotparamval, bf_hotparamval,
sizeof(double), 0, hipMemcpyDeviceToHost));
/* Use Numerical Recipes routine mnbrak to bracket a minimum in the
* objective function (reduced chi-square plus penalties) objec-
* tive(x), where x is the value of parameter p. As initial trial
* parameter values, use ax (unadjusted value) and bx, that value
* incremented by the appropriate step size (length_step,spin_step,
* etc.). mnbrak returns 3 parameter values, with bx between ax
* and cx; note that ax and bx are changed from their input values.
* It also returns the 3 corresponding objective(x) values, where
* objb is less than obja and objc. Hence there is at least one
* local minimum (but not necessarily *any* global minimum)
* somewhere between ax and cx. */
ax = hotparamval;
bx = ax + par->fparstep[p]; /* par usage us fine here */
mnbrak( &ax, &bx, &cx, &obja, &objb, &objc, objective_cuda);
/* Before homing in on local minimum, initialize flags that will
* tell us if model extended beyond POS frame (sky rendering) for
* any trial parameter value(s), if it extended beyond any POS ima-
* ges, and if it was too wide in delay-Doppler space */
check_posbnd = 0;
check_badposet = 0;
check_badradar = 0;
/* Now use Numerical Recipes function brent to find local minimum -
* that is, to find xmin, the best value of x, to within the
* *fractional* tolerance specified for parameter p (length_tol,
* spin_tol, etc.). brent's return value is the minimized objective
* function, objective(xmin). If more than one local minimum bet-
* ween ax and cx, brent might not find the best one. brent_abs is
* a modified version of brent that has an absolute fitting tole-
* rance as one of its arguments, in addition to the existing
* fractional tolerance. */
enderr = brent_abs( ax, bx, cx, objective_cuda,
par->fpartol[p], par->fparabstol[p], &xmin);
/* Realize whichever part(s) of the model has changed.
*
* The code here is somewhat opaque because more than one part of
* the model may have changed - if the "vary_delcor0" "vary_radalb"
* and/or "vary_optalb" parameter is being used to permit joint pa-
* rameter adjustments. Before calling the vary_params routine, the
* size/shape and spin states must be realized (realize_mod and
* realize_spin); if albedos are being varied jointly with other
* parameters, the photometric state must also be realized
* (realize_photo); and in either case the 0th-order delay correc-
* tion polynomial coefficients must be reset to their saved
* values via the appropriate call to realize_delcor. */
/* Set the value pointed to by hotparam to 0.95 of its
* previous value (*hotparam) = xmin; */
hipLaunchKernelGGL(( bf_set_hotparam_val_krnl), dim3(1),dim3(1), 0, 0, xmin);
checkErrorAfterKernelLaunch("bf_set_hotparam_val_krnl");
gpuErrchk(hipMemcpyFromSymbol(&hotparamval, bf_hotparamval,
sizeof(double), 0, hipMemcpyDeviceToHost));
if (newsize || newshape)
realize_mod_cuda(dpar, dmod, type);
if (newspin) {
if (AF) realize_spin_cuda_af(dpar, dmod, ddat, dat->nsets);
else if (STREAMS) realize_spin_cuda_streams(dpar, dmod, ddat, dat->nsets);
else realize_spin_cuda(dpar, dmod, ddat, dat->nsets);
}
if ((newsize && vary_alb_size) || ((newshape ||
newspin) && vary_alb_shapespin))
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 1); /* set R to R_save */
if ((newsize && vary_delcor0_size) || ((newshape || newspin)
&& vary_delcor0_shapespin))
realize_delcor_cuda(ddat, 0.0, 1, dat->nsets); /* set delcor0 to delcor0_save */
if ((newspin && vary_dopscale_spin) || ((newsize || newshape)
&& vary_dopscale_sizeshape))
realize_dopscale_cuda(dpar, ddat, 1.0, 1); /* set dopscale to dopscale_save */
if (call_vary_params) {
/* Call vary_params to get the adjustments to 0th-order delay
* correction polynomial coefficients, to Doppler scaling fac-
* tors, and to radar and optical albedos */
if (AF)
vary_params_af(dpar,dmod,ddat, 11, &deldop_zmax,&rad_xsec,
&opt_brightness,&cos_subradarlat, dat->nsets);
else //11 - this used to be MPI_SETPAR_VARY
vary_params_cuda(dpar,dmod,ddat,11,&deldop_zmax,&rad_xsec,
&opt_brightness, &cos_subradarlat, dat->nsets);
delta_delcor0 = (deldop_zmax - deldop_zmax_save)*KM2US;
if (cos_subradarlat != 0.0)
dopscale_factor = cos_subradarlat_save/cos_subradarlat;
if (rad_xsec != 0.0)
radalb_factor = rad_xsec_save/rad_xsec;
if (opt_brightness != 0.0)
optalb_factor = opt_brightness_save/opt_brightness;
}
if ((newsize && vary_alb_size) || ((newshape || newspin) &&
vary_alb_shapespin)) {
realize_photo_cuda(dpar, dmod, radalb_factor, optalb_factor, 2); /* reset R, then R_save */
/* Must update opt_brightness_save for Hapke optical scattering
* law, since single-scattering albedo w isn't just an overall
* scaling factor */
if (vary_hapke) {
if (AF)
vary_params_af(dpar,dmod,ddat,12,&dummyval2,&dummyval3,
&opt_brightness_save,&dummyval4, dat->nsets);
else // used to be MPI_SETPAR_HAPKE
vary_params_cuda(dpar,dmod,ddat,12,&dummyval2,&dummyval3,
&opt_brightness_save, &dummyval4, dat->nsets);
}
} else if (newphoto) {
rad_xsec_save = rad_xsec;
opt_brightness_save = opt_brightness;
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
}
if ((newsize && vary_delcor0_size) || ((newshape || newspin) &&
vary_delcor0_shapespin)) {
deldop_zmax_save = deldop_zmax;
realize_delcor_cuda(ddat, delta_delcor0, 2, dat->nsets); /* reset delcor0, then delcor0_save */
} else if (newdelcor) {
realize_delcor_cuda(ddat, 0.0, 0, dat->nsets); /* set delcor0_save to delcor0 */
}
if ((newspin && vary_dopscale_spin) || ((newsize || newshape) &&
vary_dopscale_sizeshape)) {
cos_subradarlat_save = cos_subradarlat;
realize_dopscale_cuda(dpar, ddat, dopscale_factor, 2); /* reset dopscale, then dopscale_save */
} else if (newdopscale) {
realize_dopscale_cuda(dpar, ddat, 1.0, 0); /* set dopscale_save to dopscale */
}
if (newxyoff)
realize_xyoff_cuda(ddat);
/* If the model extended beyond POS frame (sky rendering) for any
* trial parameter value(s), if it extended beyond any plane-of-
* sky fit frames, or if it was too wide in delay-Doppler space,
* evaluate model for best-fit parameter value to check if these
* problems persist - that is, to update "posbnd" "badposet" and
* "badradar" parameters for updated model.
* (This needn't be done for "baddiam" "badphoto" flags: if we've
* just finished adjusting an ellipsoid dimension or photometric
* parameter, realize_mod or realize_photo was called in code block
* above in order to realize the changed portion of model, and that
* call updated corresponding flag. Also we needn't worry about the
* "baddopscale" flag, since realize_dopscale was called above if
* Doppler scaling factors were changed.) The call to objective
* (*hotparam) first sets *hotparam (the parameter that we just
* adjusted) equal to itself (i.e., no change) and then calls
* calc_fits to evaluate the model for all datasets. */
if (check_posbnd || check_badposet || check_badradar)
objective_cuda(hotparamval);//(*hotparam);
/* Launch single-thread kernel to retrieve flags in dev_par */
hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
/* Display the objective function after each parameter adjustment. */
printf("%4d %8.6f %d", p, enderr, iround(par->fpartype[p]));
if (flags[0]) printf(" (BAD DIAMS)");
if (flags[1]) printf(" (BAD PHOTO)");
if (flags[2]) printf(" (BAD POS)");
if (flags[3]) printf(" (BAD POSET)");
if (flags[4]) printf(" (BAD RADAR)");
if (flags[5]) printf(" (BAD DOPSCALE)");
printf("\n");
fflush(stdout);
/* Display reduced chi-square and individual penalty values after
* every 20th parameter adjustment. Setting showvals to 1 here
* means that these things will be displayed next time objective(x)
* is evaluated - at start of NEXT parameter adjustment. Specifi-
* cally, they will be displayed when routine mnbrak evaluates
* objective(x) for *unadjusted* parameter value ax (see comment
* above).
* Also rewrite model and obs files after every 20th parameter
* adjustment. Most of obs file doesn't change, but some floating
* parameters (i.e. delay correction polynomial coefficients) do. */
if (++cntr >= par->npar_update) {
cntr = 0;
showvals = 1;
if (AF) {
calc_fits_cuda_af(dpar, dmod, ddat);
chi2_cuda_af(dpar, ddat, 0, dat->nsets);
}
else if (STREAMS) {
calc_fits_cuda_streams(sdev_par, sdev_mod, sdev_dat);
chi2_cuda_streams(sdev_par, sdev_dat, 0, sdat->nsets);
}
else {
calc_fits_cuda(dpar, dmod, ddat);
chi2_cuda(dpar, ddat, 0);
}
//write_mod( par, mod);
//write_dat( par, dat);
}
}
/* End of this iteration: Write model and data to disk, and display the
* region within each delay-Doppler or Doppler frame for which model
* power is nonzero. */
if (cntr != 0) {
if (AF){
calc_fits_cuda_af(dpar, dmod, ddat);
chi2_cuda_af(dpar, ddat, 0, dat->nsets);
}
else if (STREAMS) {
calc_fits_cuda_streams(sdev_par, sdev_mod, sdev_dat);
chi2_cuda_streams(sdev_par, sdev_dat, 0, sdat->nsets);
}
else {
calc_fits_cuda(dpar, dmod, ddat);
chi2_cuda(dpar, ddat, 0);
}
//write_mod( par, mod);
//write_dat( par, dat);
}
show_deldoplim_cuda(dat, ddat);
/* Check if we should start a new iteration */
if (iter == par->term_maxiter) {
/* Just completed last iteration permitted by "term_maxiter" para-
* meter, so stop iterating; note that since iter is 1-based, this
* test is always false if "term_maxiter" = 0 (its default value) */
keep_iterating = 0;
} else if (first_fitpar > 0) {
/* Just completed partial iteration (possible for iteration 1): if
* "objfunc_start" parameter was given, check if fractional decrea-
* se in objective function *relative to objfunc_start* during the
* just-completed iteration was larger than term_prec, thus
* justifying a new iteration; if it wasn't specified, definitely
* proceed to a new iteration. */
if (par->objfunc_start > 0.0)
keep_iterating = ((par->objfunc_start - enderr)/enderr >= par->term_prec);
else
keep_iterating = 1;
first_fitpar = 0; /* for all iterations after the first iteration */
} else if (par->term_badmodel && (flags[0] || flags[1] || flags[2] ||
flags[3] || flags[4] || flags[5]) ) {
/* Just completed a full iteration, stop iterating because "term_
* badmodel" parameter is turned on and model has a fatal flaw: it
* extends beyond POS frame OR it one or more illegal photometric
* parameters OR it has one or more tiny or negative ellipsoid dia-
* meters OR it has plane-of-sky fit frames too small to "contain"
* model OR it is too wide in delay-Doppler space for (delay-)
* Doppler fit frames to be correctly constructed OR it has out-of-
* range values for one or more Doppler scaling factors */
keep_iterating = 0;
} else {
/* Just completed a full iteration and the model has no fatal flaws
* (or else the "term_badmodel" parameter is turned off): keep
* iterating if fractional decrease objective function during the
* just-completed iteration was greater than term_prec */
keep_iterating = ((beginerr - enderr)/enderr >= par->term_prec);
}
// } while (keep_iterating);
/* Show final values of reduced chi-square, individual penalty functions,
* and the objective function */
if (AF)
final_chi2 = chi2_cuda_af(dpar, ddat, 1, dat->nsets);
else if (STREAMS)
final_chi2 = chi2_cuda_streams(dpar, ddat, 1, dat->nsets);
else
final_chi2 = chi2_cuda(dpar, ddat, 1);
final_redchi2 = final_chi2/dat->dof;
printf("# search completed\n");
/* Launch single-thread kernel to get these final flags from dev->par:
* pen.n, baddiam, badphoto, posbnd, badposet, badradar, baddopscale */
/* Launch single-thread kernel to retrieve flags in dev_par */
hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
if (par->pen.n > 0 || flags[0] || flags[1] || flags[2] || flags[3] ||
flags[4] || flags[5]) {
printf("#\n");
printf("# %15s %e\n", "reduced chi2", final_redchi2);
if (par->pen.n > 0) {
par->showstate = 1;
penalties_cuda(dpar, dmod, ddat);
par->showstate = 0;
}
if (flags[0])
printf("# objective func multiplied by %.1f: illegal ellipsoid diameters\n",
baddiam_factor);
if (flags[1])
printf("# objective func multiplied by %.1f: illegal photometric parameters\n",
badphoto_factor);
if (flags[2])
printf("# objective func multiplied by %.1f: model extends beyond POS frame\n",
posbnd_factor);
if (flags[3])
printf("# objective func multiplied by %.1f: "
"model extends beyond plane-of-sky fit image\n",
badposet_factor);
if (flags[4])
printf("# objective func multiplied by %.1f: "
"model is too wide in delay-Doppler space to construct fit image\n",
badradar_factor);
if (flags[5])
printf("# objective func multiplied by %.1f: illegal Doppler scaling factors\n",
baddopscale_factor);
printf("# ----------------------------\n");
printf("# %15s %e\n", "objective func", enderr);
printf("#\n");
}
intifpossible( dofstring, MAXLEN, dat->dof, SMALLVAL, "%f");
printf("# final chi2 = %e for %s dof (reduced chi2 = %f)\n",
final_chi2, dofstring, final_redchi2);
printf("#\n");
fflush(stdout);
hipFree(sdev_par);
hipFree(sdev_mod);
hipFree(sdev_dat);
hipFree(fparstep);
hipFree(fpartol);
hipFree(fparabstol);
hipFree(fpartype);
hipFree(fpntr);
hipFree(flags);
hipDeviceReset();
return enderr;
}
__host__ double bestfit_CUDA2(struct par_t *dpar, struct mod_t *dmod,
struct dat_t *ddat, struct par_t *par, struct mod_t *mod,
struct dat_t *dat)
{
char hostname[MAXLEN], dofstring[MAXLEN];
int i, iter=0, p, cntr, first_fitpar, partype, keep_iterating=1, ilaw, nf, term_maxiter;
long pid_long;
pid_t pid;
double beginerr, enderr, ax, bx, cx, obja, objb, objc, xmin, final_chi2,
final_redchi2, dummyval2, dummyval3, dummyval4, delta_delcor0,
dopscale_factor, radalb_factor, optalb_factor, *hfparstep, *hfpartol,
*hfparabstol, objfunc_start, term_prec;
unsigned char *flags, *hflags, *htype, *dtype, action, avoid_badpos, term_badmodel;
int nsets, *nframes, *lc_n, *nviews, nfpar, *hfpartype, npar_update, max_frames=0;
struct vertices_t **verts;
dim3 THD, BLK;
/* This section collects parameters used for CUDA kernel launches throughout
* the program. The cudaStreams created here are used/re-used for the
* lifetime of one program run */
nsets = dat->nsets;
nfpar = par->nfpar;
nf = mod->shape.comp[0].real.nf;
action = par->action;
npar_update = par->npar_update;
avoid_badpos = par->avoid_badpos;
objfunc_start = par->objfunc_start;
term_prec = par->term_prec;
term_badmodel = par->term_badmodel;
type = mod->shape.comp[0].type;
htype = (unsigned char *) malloc(nsets*sizeof(unsigned char));
nframes = (int *) malloc(nsets*sizeof(int));
lc_n = (int *) malloc(nsets*sizeof(int));
nviews = (int *) malloc(nsets*sizeof(int));
gpuErrchk(hipMalloc((void**)&dtype, sizeof(unsigned char)*nsets));
gpuErrchk(hipMalloc((void**)&verts, sizeof(struct vertices_t)*2));
for (int s=0; s<nsets; s++) {
htype[s] = dat->set[s].type;
switch (htype[s]) {
case DELAY:
nframes[s] = dat->set[s].desc.deldop.nframes;
nviews[s] = dat->set[s].desc.deldop.nviews;
lc_n[s] = 0;
break;
case DOPPLER:
nframes[s] = dat->set[s].desc.doppler.nframes;
nviews[s] = dat->set[s].desc.doppler.nviews;
lc_n[s] = 0;
break;
case POS:
nframes[s] = dat->set[s].desc.poset.nframes;
nviews[s] = dat->set[s].desc.poset.nviews;
lc_n[s] = 0;
break;
case LGHTCRV:
nframes[s] = dat->set[s].desc.lghtcrv.ncalc;
nviews[s] = dat->set[s].desc.lghtcrv.nviews;
lc_n[s] = dat->set[s].desc.lghtcrv.n;
break;
}
if (nframes[s]>max_frames) max_frames = nframes[s];
}
gpuErrchk(hipMemcpy(dtype, htype, sizeof(unsigned char)*nsets,
hipMemcpyHostToDevice));
hipStream_t bf_stream[max_frames];
for (int f=0; f<max_frames; f++)
hipStreamCreate(&bf_stream[f]);
/*..........................End section..................................*/
/* Get the hostname of host machine and the PID */
(void) gethostname(hostname, MAXLEN-1);
pid = getpid();
pid_long = (long) pid; /* Assumes pid_t fits in a long */
printf("#\n# CUDA fit (pid %ld on %s)\n", pid_long, hostname);
fflush(stdout);
/* Allocate memory for pointers, steps, and tolerances on bothhost and
* device. fpntr remains a hipMallocManaged allocation because it is a
* double pointer. */
gpuErrchk(hipMalloc((void**)&sdev_par, sizeof(struct par_t)));
gpuErrchk(hipMemcpy(sdev_par, &par, sizeof(struct par_t), hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&sdev_mod, sizeof(struct mod_t)));
gpuErrchk(hipMemcpy(sdev_mod, &mod, sizeof(struct mod_t), hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&sdev_dat, sizeof(struct dat_t)));
gpuErrchk(hipMemcpy(sdev_dat, &dat, sizeof(struct dat_t), hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&flags, sizeof(unsigned char) * 7));
gpuErrchk(hipMalloc((void**)&fparstep, sizeof(double) * nfpar));
gpuErrchk(hipMalloc((void**)&fpartol, sizeof(double) * nfpar));
gpuErrchk(hipMalloc((void**)&fparabstol, sizeof(double) * nfpar));
gpuErrchk(hipMalloc((void**)&fpartype, sizeof(int) * nfpar));
cudaCalloc1((void**)&fpntr, sizeof(double*), nfpar);
hfparstep = (double *) malloc(nfpar*sizeof(double));
hfpartol = (double *) malloc(nfpar*sizeof(double));
hfparabstol = (double *) malloc(nfpar*sizeof(double));
hfpartype = (int *) malloc(nfpar*sizeof(int));
hflags = (unsigned char *) malloc(7*sizeof(unsigned char));
for (i=0; i<nfpar; i++)
gpuErrchk(hipMalloc((void**)&fpntr[i], sizeof(double) * 1));
/* Set vertices shortcut */
hipLaunchKernelGGL(( set_verts_shortcut_krnl), dim3(1),dim3(1), 0, 0, dmod, verts);
checkErrorAfterKernelLaunch("set_verts_shortcut_krnl");
/* Initialize static global pointers used by objective(x) below
to be compatible with "Numerical Recipes in C" routines */
spar = par; smod = mod; sdat = dat;
sdev_par = dpar; sdev_mod = dmod; sdev_dat = ddat;
/* Initialize static global parameters */
newsize = newshape = newspin = newphoto = newdelcor = newdopscale = newxyoff = 1;
deldop_zmax = deldop_zmax_save = 0.0;
cos_subradarlat = cos_subradarlat_save = 0.0;
rad_xsec = rad_xsec_save = 0.0;
opt_brightness = opt_brightness_save = 0.0;
vary_delcor0_size = (par->vary_delcor0 != VARY_NONE);
vary_delcor0_shapespin = (par->vary_delcor0 == VARY_ALL);
vary_dopscale_spin = (par->vary_dopscale != VARY_NONE);
vary_dopscale_sizeshape = (par->vary_dopscale == VARY_ALL);
vary_alb_size = (par->vary_radalb != VARY_NONE || par->vary_optalb != VARY_NONE);
vary_alb_shapespin = (par->vary_radalb == VARY_ALL || par->vary_optalb == VARY_ALL);
vary_hapke = 0;
if (par->vary_optalb != VARY_NONE)
for (ilaw=0; ilaw<mod->photo.noptlaws; ilaw++)
if (mod->photo.opttype[ilaw] == HAPKE || mod->photo.opttype[ilaw] == HARMHAPKE
|| mod->photo.opttype[ilaw] == INHOHAPKE)
vary_hapke = 1;
call_vary_params = (par->vary_delcor0 != VARY_NONE || par->vary_dopscale != VARY_NONE
|| par->vary_radalb != VARY_NONE
|| par->vary_optalb != VARY_NONE);
/* Initialize local parameters */
delta_delcor0 = 0.0;
dopscale_factor = radalb_factor = optalb_factor = 1.0;
/* The following call sets up the parameter lists allocated above and copy
* the device contents to host copies */
mkparlist_cuda2(dpar, dmod, ddat, fparstep, fpartol, fparabstol, fpartype,
fpntr, nfpar, nsets);
gpuErrchk(hipMemcpy(hfparstep, fparstep, sizeof(double)*nfpar, hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(hfpartol, fpartol, sizeof(double)*nfpar, hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(hfparabstol, fparabstol, sizeof(double)*nfpar, hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(hfpartype, fpartype, sizeof(int)*nfpar, hipMemcpyDeviceToHost));
/* Compute deldop_zmax_save, cos_subradarlat_save, rad_xsec_save, and
* opt_brightness_save for the initial model */
if (call_vary_params)
{
realize_mod_cuda(dpar, dmod, type);
if (AF) {
realize_spin_cuda_af(dpar, dmod, ddat, nsets);
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
vary_params_af(dpar, dmod, ddat, action, &deldop_zmax_save,
&rad_xsec_save, &opt_brightness_save,
&cos_subradarlat_save, nsets);
}
else if (STREAMS) {
realize_spin_cuda_streams(dpar, dmod, ddat, nsets);
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
vary_params_cuda_streams2(dpar, dmod, ddat, action,
&deldop_zmax_save, &rad_xsec_save, &opt_brightness_save,
&cos_subradarlat_save, nsets);
}
else if (STREAMS2){
if (FLOAT)
realize_spin_cuda_streams2f(dpar, dmod, ddat, htype, nframes,
nviews, nsets, bf_stream);
else
realize_spin_cuda_streams2(dpar, dmod, ddat, htype, nframes, nviews,
nsets, bf_stream);
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
if (FLOAT)
vary_params_cuda_streams3f(dpar, dmod, ddat, action, &deldop_zmax_save,
&rad_xsec_save, &opt_brightness_save, &cos_subradarlat_save,
nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
bf_stream);
else
vary_params_cuda_streams3(dpar, dmod, ddat, action, &deldop_zmax_save,
&rad_xsec_save, &opt_brightness_save, &cos_subradarlat_save,
nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
bf_stream);
}
else {
realize_spin_cuda(dpar, dmod, ddat, nsets);
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
vary_params_cuda(dpar, dmod, ddat, action, &deldop_zmax_save,
&rad_xsec_save,&opt_brightness_save,&cos_subradarlat_save,
nsets);
}
}
printf("rad_xsec: %f\n", rad_xsec_save);
printf("deldop_zmax: %f\n", (float)deldop_zmax_save);
/* Point hotparam to a dummy variable (dummyval) rather than to a model pa-
* rameter; then call objective(0.0) to set dummy variable = 0.0, realize
* the initial model, calculate the fits, return initial model's objective
* function as enderr. */
hipLaunchKernelGGL(( bf_set_hotparam_initial_krnl), dim3(1),dim3(1), 0, 0, );
checkErrorAfterKernelLaunch("bf_set_hotparam_initial_krnl");
if (STREAMS2)
enderr = objective_cuda_streams(0.0, verts, htype, dtype, nframes,
nviews, lc_n, nsets, nf, bf_stream);
else
enderr = objective_cuda(0.0);
printf("#\n# searching for best fit ...\n");
printf("%4d %8.6f to begin", 0, enderr);
/* Launch single-thread kernel to retrieve flags in dev_par */
/* flags[0] = dpar->baddiam;
flags[1] = dpar->badphoto;
flags[2] = dpar->posbnd;
flags[3] = dpar->badposet;
flags[4] = dpar->badradar;
flags[5] = dpar->baddopscale;*/
hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
gpuErrchk(hipMemcpy(hflags, flags, sizeof(unsigned char)*7,
hipMemcpyDeviceToHost));
/* Now act on the flags just retrieved from dev_par */
if (hflags[0]) printf(" (BAD DIAMS)");
if (hflags[1]) printf(" (BAD PHOTO)");
if (hflags[2]) printf(" (BAD POS)");
if (hflags[3]) printf(" (BAD POSET)");
if (hflags[4]) printf(" (BAD RADAR)");
if (hflags[5]) printf(" (BAD DOPSCALE)"); printf("\n");
fflush(stdout);
/* Display the region within each delay-Doppler or Doppler frame that, ac-
* cording to initial model, has nonzero power. A warning is displayed if
* any region extends beyond the data limits: the vignetting is too tight,
* or else some model parameter (such as a delay correction polynomial co-
* efficient) is seriously in error. */
show_deldoplim_cuda_streams(ddat, htype, nsets, nframes, max_frames);
/* Set the starting fit parameter for the first iteration only */
first_fitpar = par->first_fitpar;
term_maxiter = par->term_maxiter;
if (first_fitpar < 0 || first_fitpar >= nfpar) {
printf("ERROR: need 0 <= first_fitpar < nparams (%d)\n", nfpar);
bailout("bestfit.c\n");
}
/* Iteratively adjust model; for each iteration, step through all free pa-
* rameters, adjusting one parameter at a time so as to minimize the objec-
* tive function at each step. Stop when fractional decrease in the objec-
* tive function from one iteration to the next is less than term_prec. */
// do {
showvals = 1; /* show reduced chi-square and penalties at beginning */
beginerr = enderr;
printf("# iteration %d %f", ++iter, beginerr);
/* Launch single-thread kernel to retrieve flags in dev_par */
hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
gpuErrchk(hipMemcpy(hflags, flags, sizeof(unsigned char)*7,
hipMemcpyDeviceToHost));
/* Now act on the flags just retrieved from dev_par */
if (hflags[0]) printf(" (BAD DIAMS)");
if (hflags[1]) printf(" (BAD PHOTO)");
if (hflags[2]) printf(" (BAD POS)");
if (hflags[3]) printf(" (BAD POSET)");
if (hflags[4]) printf(" (BAD RADAR)");
if (hflags[5]) printf(" (BAD DOPSCALE)"); printf("\n");
fflush(stdout);
/* Show breakdown of chi-square by data type */
if (AF)
chi2_cuda_af(dpar, ddat, 1, nsets);
else if (STREAMS)
chi2_cuda_streams(dpar, ddat, 1, nsets);
else if (STREAMS2)
chi2_cuda_streams2(dpar, ddat, htype, dtype, nframes, lc_n, 1,
nsets, bf_stream);
else
chi2_cuda(dpar, ddat, 1);
/* Loop through the free parameters */
cntr = first_fitpar % npar_update;
p = first_fitpar = 1;
//for (p=first_fitpar; p<nfpar; p++) {
//p = first_fitpar;
/* Adjust only parameter p on this try */
hipLaunchKernelGGL(( bf_set_hotparam_pntr_krnl), dim3(1),dim3(1), 0, 0, fpntr, fpartype, p);
checkErrorAfterKernelLaunch("bf_set_hotparam_pntr_krnl");
gpuErrchk(hipMemcpyFromSymbol(&partype, bf_partype, sizeof(int),
0, hipMemcpyDeviceToHost));
newsize = newshape = newspin = newphoto = newdelcor = newdopscale
= newxyoff = 0;
if (partype == SIZEPAR) newsize = 1;
else if (partype == SHAPEPAR) newshape = 1;
else if (partype == SPINPAR) newspin = 1;
else if (partype == PHOTOPAR) newphoto = 1;
else if (partype == DELCORPAR) newdelcor = 1;
else if (partype == DOPSCALEPAR) newdopscale = 1;
else if (partype == XYOFFPAR) newxyoff = 1;
/* If this is a size parameter AND model extends beyond POS frame
* AND the "avoid_badpos" parameter is turned on, shrink model by
* 5% at a time until it fits within the POS frame.
* We must start with the redundant model evaluation for the un-
* changed value of the size parameter, in case the first call to
* objective displays reduced chi-square and the penalty functions. */
if (avoid_badpos && partype == SIZEPAR) {
hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
gpuErrchk(hipMemcpy(hflags, flags, sizeof(unsigned char)*7,
hipMemcpyDeviceToHost));
/* Get value of (*hotparam) */
hipLaunchKernelGGL(( bf_get_hotparam_val_krnl), dim3(1),dim3(1), 0, 0, );
checkErrorAfterKernelLaunch("bf_get_hotparam_val_krnl");
gpuErrchk(hipMemcpyFromSymbol(&hotparamval, bf_hotparamval,
sizeof(double), 0, hipMemcpyDeviceToHost));
while (hflags[2]) {
if (STREAMS2)
objective_cuda_streams(hotparamval, verts, htype, dtype,
nframes, nviews, lc_n, nsets, nf, bf_stream);
else
objective_cuda(hotparamval);
hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
gpuErrchk(hipMemcpy(hflags, flags, sizeof(unsigned char)*7,
hipMemcpyDeviceToHost));
if (hflags[2]) {
/* Set the value pointed to by hotparam to 0.95 of its
* previous value */
hipLaunchKernelGGL(( bf_mult_hotparam_val_krnl), dim3(1),dim3(1), 0, 0, 0.95);
checkErrorAfterKernelLaunch("bf_mult_hotparam_val_krnl");
}
}
}
/* Get value of (*hotparam) so that mnbrak can use it*/
hipLaunchKernelGGL(( bf_get_hotparam_val_krnl), dim3(1),dim3(1), 0, 0, );
checkErrorAfterKernelLaunch("bf_get_hotparam_val_krnl");
gpuErrchk(hipMemcpyFromSymbol(&hotparamval, bf_hotparamval,
sizeof(double), 0, hipMemcpyDeviceToHost));
/* Use Numerical Recipes routine mnbrak to bracket a minimum in the
* objective function (reduced chi-square plus penalties) objec-
* tive(x), where x is the value of parameter p. As initial trial
* parameter values, use ax (unadjusted value) and bx, that value
* incremented by the appropriate step size (length_step,spin_step,
* etc.). mnbrak returns 3 parameter values, with bx between ax
* and cx; note that ax and bx are changed from their input values.
* It also returns the 3 corresponding objective(x) values, where
* objb is less than obja and objc. Hence there is at least one
* local minimum (but not necessarily *any* global minimum)
* somewhere between ax and cx. */
ax = hotparamval;
bx = ax + hfparstep[p]; /* par usage us fine here */
if (STREAMS2)
mnbrak_streams(&ax, &bx, &cx, &obja, &objb, &objc,
objective_cuda_streams, verts, htype, dtype, nframes,
nviews, lc_n, nsets, nf, bf_stream);
else
mnbrak( &ax, &bx, &cx, &obja, &objb, &objc, objective_cuda);
/* Before homing in on local minimum, initialize flags that will
* tell us if model extended beyond POS frame (sky rendering) for
* any trial parameter value(s), if it extended beyond any POS ima-
* ges, and if it was too wide in delay-Doppler space */
check_posbnd = 0;
check_badposet = 0;
check_badradar = 0;
/* Now use Numerical Recipes function brent to find local minimum -
* that is, to find xmin, the best value of x, to within the
* *fractional* tolerance specified for parameter p (length_tol,
* spin_tol, etc.). brent's return value is the minimized objective
* function, objective(xmin). If more than one local minimum bet-
* ween ax and cx, brent might not find the best one. brent_abs is
* a modified version of brent that has an absolute fitting tole-
* rance as one of its arguments, in addition to the existing
* fractional tolerance. */
if (STREAMS2)
enderr = brent_abs_streams(ax, bx, cx, objective_cuda_streams, hfpartol[p],
hfparabstol[p], &xmin, verts, htype, dtype, nframes, nviews, lc_n,
nsets, nf, bf_stream);
else
enderr = brent_abs( ax, bx, cx, objective_cuda,
fpartol[p], fparabstol[p], &xmin);
//
// /* Realize whichever part(s) of the model has changed.
// *
// * The code here is somewhat opaque because more than one part of
// * the model may have changed - if the "vary_delcor0" "vary_radalb"
// * and/or "vary_optalb" parameter is being used to permit joint pa-
// * rameter adjustments. Before calling the vary_params routine, the
// * size/shape and spin states must be realized (realize_mod and
// * realize_spin); if albedos are being varied jointly with other
// * parameters, the photometric state must also be realized
// * (realize_photo); and in either case the 0th-order delay correc-
// * tion polynomial coefficients must be reset to their saved
// * values via the appropriate call to realize_delcor. */
// /* Set the value pointed to by hotparam to 0.95 of its
// * previous value (*hotparam) = xmin; */
// hipLaunchKernelGGL(( bf_set_hotparam_val_krnl), dim3(1),dim3(1), 0, 0, xmin);
// checkErrorAfterKernelLaunch("bf_set_hotparam_val_krnl");
// gpuErrchk(hipMemcpyFromSymbol(&hotparamval, bf_hotparamval,
// sizeof(double), 0, hipMemcpyDeviceToHost));
//
// if (newsize || newshape)
// realize_mod_cuda(dpar, dmod, type);
// if (newspin) {
// if (AF)
// realize_spin_cuda_af(dpar, dmod, ddat, nsets);
// else if (STREAMS)
// realize_spin_cuda_streams(dpar, dmod, ddat, nsets);
// else if (STREAMS2)
// realize_spin_cuda_streams2(dpar, dmod, ddat, htype, nframes,
// nviews, nsets, bf_stream);
// else
// realize_spin_cuda(dpar, dmod, ddat, nsets);
// }
// if ((newsize && vary_alb_size) || ((newshape ||
// newspin) && vary_alb_shapespin))
// realize_photo_cuda(dpar, dmod, 1.0, 1.0, 1); /* set R to R_save */
// if ((newsize && vary_delcor0_size) || ((newshape || newspin)
// && vary_delcor0_shapespin)) {
// if (FLOAT)
// realize_delcor_cuda_f(ddat, 0.0, 1, nsets, htype, nframes); /* set delcor0 to delcor0_save */
// else
// realize_delcor_cuda(ddat, 0.0, 1, nsets); /* set delcor0 to delcor0_save */
// }
// if ((newspin && vary_dopscale_spin) || ((newsize || newshape)
// && vary_dopscale_sizeshape))
// realize_dopscale_cuda_streams(dpar, ddat, 1.0, 1, nsets, dtype); /* set dopscale to dopscale_save */
// if (call_vary_params) {
// /* Call vary_params to get the adjustments to 0th-order delay
// * correction polynomial coefficients, to Doppler scaling fac-
// * tors, and to radar and optical albedos */
// if (AF)
// vary_params_af(dpar,dmod,ddat, 11, &deldop_zmax,&rad_xsec,
// &opt_brightness,&cos_subradarlat, nsets);
// if (STREAMS)
// vary_params_cuda_streams2(dpar,dmod,ddat,11, &deldop_zmax,
// &rad_xsec,&opt_brightness,&cos_subradarlat,nsets);
// else if (STREAMS2)
// vary_params_cuda_streams3(dpar,dmod,ddat,11,&deldop_zmax,
// &rad_xsec, &opt_brightness, &cos_subradarlat,
// nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
// bf_stream);
// else //11 - this used to be MPI_SETPAR_VARY
// vary_params_cuda(dpar,dmod,ddat,11,&deldop_zmax,&rad_xsec,
// &opt_brightness, &cos_subradarlat, nsets);
//
// delta_delcor0 = (deldop_zmax - deldop_zmax_save)*KM2US;
// if (cos_subradarlat != 0.0)
// dopscale_factor = cos_subradarlat_save/cos_subradarlat;
// if (rad_xsec != 0.0)
// radalb_factor = rad_xsec_save/rad_xsec;
// if (opt_brightness != 0.0)
// optalb_factor = opt_brightness_save/opt_brightness;
// }
// if ((newsize && vary_alb_size) || ((newshape || newspin) &&
// vary_alb_shapespin)) {
// realize_photo_cuda(dpar, dmod, radalb_factor, optalb_factor, 2); /* reset R, then R_save */
//
// /* Must update opt_brightness_save for Hapke optical scattering
// * law, since single-scattering albedo w isn't just an overall
// * scaling factor */
// if (vary_hapke) {
// if (AF)
// vary_params_af(dpar,dmod,ddat,12,&dummyval2,&dummyval3,
// &opt_brightness_save,&dummyval4, nsets);
// if (STREAMS)
// vary_params_cuda_streams2(dpar,dmod,ddat,12, &dummyval2,
// &dummyval3,&opt_brightness,&dummyval4,nsets);
// else if (STREAMS2)
// vary_params_cuda_streams3(dpar,dmod,ddat,12,&dummyval2,
// &dummyval3,&opt_brightness,&dummyval4,
// nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
// bf_stream);
// else // used to be MPI_SETPAR_HAPKE
// vary_params_cuda(dpar,dmod,ddat,12,&dummyval2,&dummyval3,
// &opt_brightness_save, &dummyval4, nsets);
// }
// } else if (newphoto) {
// rad_xsec_save = rad_xsec;
// opt_brightness_save = opt_brightness;
// realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
// }
// if ((newsize && vary_delcor0_size) || ((newshape || newspin) &&
// vary_delcor0_shapespin)) {
// deldop_zmax_save = deldop_zmax;
// if (FLOAT)
// realize_delcor_cuda_f(ddat, delta_delcor0, 2, nsets, htype, nframes); /* reset delcor0, then delcor0_save */
// else
// realize_delcor_cuda(ddat, delta_delcor0, 2, nsets); /* reset delcor0, then delcor0_save */
// } else if (newdelcor) {
// if (FLOAT)
// realize_delcor_cuda_f(ddat, 0.0, 0, nsets, htype, nframes); /* set delcor0_save to delcor0 */
// else
// realize_delcor_cuda(ddat, 0.0, 0, nsets); /* set delcor0_save to delcor0 */
// }
// if ((newspin && vary_dopscale_spin) || ((newsize || newshape) &&
// vary_dopscale_sizeshape)) {
// cos_subradarlat_save = cos_subradarlat;
// realize_dopscale_cuda_streams(dpar, ddat, dopscale_factor, 2, nsets, dtype); /* reset dopscale, then dopscale_save */
// } else if (newdopscale) {
// realize_dopscale_cuda_streams(dpar, ddat, 1.0, 0, nsets, dtype); /* set dopscale_save to dopscale */
// }
// if (newxyoff)
// realize_xyoff_cuda_streams(ddat, nsets, dtype);
//
// /* If the model extended beyond POS frame (sky rendering) for any
// * trial parameter value(s), if it extended beyond any plane-of-
// * sky fit frames, or if it was too wide in delay-Doppler space,
// * evaluate model for best-fit parameter value to check if these
// * problems persist - that is, to update "posbnd" "badposet" and
// * "badradar" parameters for updated model.
// * (This needn't be done for "baddiam" "badphoto" flags: if we've
// * just finished adjusting an ellipsoid dimension or photometric
// * parameter, realize_mod or realize_photo was called in code block
// * above in order to realize the changed portion of model, and that
// * call updated corresponding flag. Also we needn't worry about the
// * "baddopscale" flag, since realize_dopscale was called above if
// * Doppler scaling factors were changed.) The call to objective
// * (*hotparam) first sets *hotparam (the parameter that we just
// * adjusted) equal to itself (i.e., no change) and then calls
// * calc_fits to evaluate the model for all datasets. */
// if (check_posbnd || check_badposet || check_badradar) {
// if (STREAMS2)
// objective_cuda_streams(hotparamval, verts, htype, dtype,
// nframes, nviews, lc_n, nsets, nf, bf_stream);
// else
// objective_cuda(hotparamval);//(*hotparam);
// }
//
// /* Launch single-thread kernel to retrieve flags in dev_par */
// hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
// checkErrorAfterKernelLaunch("bf_get_flags_krnl");
// gpuErrchk(hipMemcpy(hflags, flags, sizeof(unsigned char)*7,
// hipMemcpyDeviceToHost));
// /* Display the objective function after each parameter adjustment. */
// printf("%4d %8.6f %d", p, enderr, iround(par->fpartype[p]));
// if (hflags[0]) printf(" (BAD DIAMS)");
// if (hflags[1]) printf(" (BAD PHOTO)");
// if (hflags[2]) printf(" (BAD POS)");
// if (hflags[3]) printf(" (BAD POSET)");
// if (hflags[4]) printf(" (BAD RADAR)");
// if (hflags[5]) printf(" (BAD DOPSCALE)");
// printf("\n");
// fflush(stdout);
//
// /* Display reduced chi-square and individual penalty values after
// * every 20th parameter adjustment. Setting showvals to 1 here
// * means that these things will be displayed next time objective(x)
// * is evaluated - at start of NEXT parameter adjustment. Specifi-
// * cally, they will be displayed when routine mnbrak evaluates
// * objective(x) for *unadjusted* parameter value ax (see comment
// * above).
// * Also rewrite model and obs files after every 20th parameter
// * adjustment. Most of obs file doesn't change, but some floating
// * parameters (i.e. delay correction polynomial coefficients) do. */
// if (++cntr >= npar_update) {
// cntr = 0;
// showvals = 1;
// if (AF) {
// calc_fits_cuda_af(dpar, dmod, ddat);
// chi2_cuda_af(dpar, ddat, 0, nsets);
// }
// else if (STREAMS) {
// calc_fits_cuda_streams(dpar, dmod, ddat);
// chi2_cuda_streams(dpar, ddat, 0, nsets);
// }
// else if (STREAMS2) {
// calc_fits_cuda_streams2(dpar, dmod, ddat, verts, nviews,
// nframes, lc_n, htype, nsets, nf, bf_stream);
// chi2_cuda_streams2(dpar, ddat, htype, dtype, nframes,
// lc_n, 0, nsets, bf_stream);
// }
// else {
// calc_fits_cuda(dpar, dmod, ddat);
// chi2_cuda(dpar, ddat, 0);
// }
// //write_mod( par, mod);
// //write_dat( par, dat);
// }
// }
//
// /* End of this iteration: Write model and data to disk, and display the
// * region within each delay-Doppler or Doppler frame for which model
// * power is nonzero. */
// if (cntr != 0) {
// if (AF){
// calc_fits_cuda_af(dpar, dmod, ddat);
// chi2_cuda_af(dpar, ddat, 0, nsets);
// }
// else if (STREAMS) {
// calc_fits_cuda_streams(dpar, dmod, ddat);
// chi2_cuda_streams(dpar, ddat, 0, nsets);
// }
// else if (STREAMS2) {
// calc_fits_cuda_streams2(dpar, dmod, ddat, verts, nviews,
// nframes, lc_n, htype, nsets, nf, bf_stream);
// chi2_cuda_streams2(dpar, ddat, htype, dtype, nframes,
// lc_n, 0, nsets, bf_stream);
// }
// else {
// calc_fits_cuda(dpar, dmod, ddat);
// chi2_cuda(dpar, ddat, 0);
// }
// //write_mod( par, mod);
// //write_dat( par, dat);
// }
// show_deldoplim_cuda_streams(ddat, htype, nsets, nframes, max_frames);
//
// /* Check if we should start a new iteration */
// if (iter == term_maxiter) {
// /* Just completed last iteration permitted by "term_maxiter" para-
// * meter, so stop iterating; note that since iter is 1-based, this
// * test is always false if "term_maxiter" = 0 (its default value) */
// keep_iterating = 0;
//
// } else if (first_fitpar > 0) {
// /* Just completed partial iteration (possible for iteration 1): if
// * "objfunc_start" parameter was given, check if fractional decrea-
// * se in objective function *relative to objfunc_start* during the
// * just-completed iteration was larger than term_prec, thus
// * justifying a new iteration; if it wasn't specified, definitely
// * proceed to a new iteration. */
// if (objfunc_start > 0.0)
// keep_iterating = ((objfunc_start - enderr)/enderr >= term_prec);
// else
// keep_iterating = 1;
// first_fitpar = 0; /* for all iterations after the first iteration */
//
// } else if (term_badmodel && (hflags[0] || hflags[1] || hflags[2] ||
// hflags[3] || hflags[4] || hflags[5]) ) {
//
// /* Just completed a full iteration, stop iterating because "term_
// * badmodel" parameter is turned on and model has a fatal flaw: it
// * extends beyond POS frame OR it one or more illegal photometric
// * parameters OR it has one or more tiny or negative ellipsoid dia-
// * meters OR it has plane-of-sky fit frames too small to "contain"
// * model OR it is too wide in delay-Doppler space for (delay-)
// * Doppler fit frames to be correctly constructed OR it has out-of-
// * range values for one or more Doppler scaling factors */
// keep_iterating = 0;
//
// } else {
// /* Just completed a full iteration and the model has no fatal flaws
// * (or else the "term_badmodel" parameter is turned off): keep
// * iterating if fractional decrease objective function during the
// * just-completed iteration was greater than term_prec */
// keep_iterating = ((beginerr - enderr)/enderr >= term_prec);
// }
//
// } while (keep_iterating);
//
// /* Show final values of reduced chi-square, individual penalty functions,
// * and the objective function */
// if (AF)
// final_chi2 = chi2_cuda_af(dpar, ddat, 1, nsets);
// else if (STREAMS)
// final_chi2 = chi2_cuda_streams(dpar, ddat, 1, nsets);
// else if (STREAMS2)
// final_chi2 = chi2_cuda_streams2(dpar, ddat, htype, dtype, nframes,
// lc_n, 1, nsets, bf_stream);
// else
// final_chi2 = chi2_cuda(dpar, ddat, 1);
// final_redchi2 = final_chi2/dat->dof;
// printf("# search completed\n");
//
// /* Launch single-thread kernel to get these final flags from dev->par:
// * pen.n, baddiam, badphoto, posbnd, badposet, badradar, baddopscale */
// /* Launch single-thread kernel to retrieve flags in dev_par */
// hipLaunchKernelGGL(( bf_get_flags_krnl), dim3(1),dim3(1), 0, 0, dpar, flags);
// checkErrorAfterKernelLaunch("bf_get_flags_krnl");
// gpuErrchk(hipMemcpy(hflags, flags, sizeof(unsigned char)*7,
// hipMemcpyDeviceToHost));
//
// if (par->pen.n > 0 || hflags[0] || hflags[1] || hflags[2] || hflags[3] ||
// hflags[4] || hflags[5]) {
// printf("#\n");
// printf("# %15s %e\n", "reduced chi2", final_redchi2);
// if (par->pen.n > 0) {
// par->showstate = 1;
// penalties_cuda(dpar, dmod, ddat);
// par->showstate = 0;
// }
// if (hflags[0])
// printf("# objective func multiplied by %.1f: illegal ellipsoid diameters\n",
// baddiam_factor);
// if (hflags[1])
// printf("# objective func multiplied by %.1f: illegal photometric parameters\n",
// badphoto_factor);
// if (hflags[2])
// printf("# objective func multiplied by %.1f: model extends beyond POS frame\n",
// posbnd_factor);
// if (hflags[3])
// printf("# objective func multiplied by %.1f: "
// "model extends beyond plane-of-sky fit image\n",
// badposet_factor);
// if (hflags[4])
// printf("# objective func multiplied by %.1f: "
// "model is too wide in delay-Doppler space to construct fit image\n",
// badradar_factor);
// if (hflags[5])
// printf("# objective func multiplied by %.1f: illegal Doppler scaling factors\n",
// baddopscale_factor);
// printf("# ----------------------------\n");
// printf("# %15s %e\n", "objective func", enderr);
// printf("#\n");
// }
intifpossible( dofstring, MAXLEN, dat->dof, SMALLVAL, "%f");
printf("# final chi2 = %e for %s dof (reduced chi2 = %f)\n",
final_chi2, dofstring, final_redchi2);
printf("#\n");
fflush(stdout);
/* Destroy the streams */
for (int f=0; f<max_frames; f++)
hipStreamDestroy(bf_stream[f]);
//free(hflags);
free(htype);
free(nframes);
free(lc_n);
free(nviews);
//free(hfparstep);
// free(hfpartol);
// free(hfparabstol);
// free(fpartype);
hipFree(sdev_par);
hipFree(sdev_mod);
hipFree(sdev_dat);
hipFree(fparstep);
hipFree(fpartol);
hipFree(fparabstol);
hipFree(fpartype);
hipFree(fpntr);
hipFree(flags);
hipFree(dtype);
hipFree(verts);
hipDeviceReset();
return enderr;
}
/* objective(x) is the objective function, with x the value of the one
model parameter that is being adjusted at the moment by bestfit.
Other parameters on which objective depends must be placed in static
variables at the top of this file, for compatibility with Numerical
Recipes routines mnbrak and brent (which search for minima of a
function of *one* variable).
objective(x) also displays reduced chi-square and the individual
penalty values if bestfit has set showvals = 1. It then resets
showvals to 0 after displaying these quantities. */
__host__ double objective_cuda( double x)
{
double err, pens, delta_delcor0, dopscale_factor, radalb_factor,
optalb_factor;
/* Initialize local parameters */
delta_delcor0 = 0.0;
dopscale_factor = radalb_factor = optalb_factor = 1.0;
/* Assign new trial value to the model parameter being adjusted */
hipLaunchKernelGGL(( bf_set_hotparam_val_krnl), dim3(1),dim3(1), 0, 0, x); //(*hotparam) = x;
checkErrorAfterKernelLaunch("bf_set_hotparam_val_krnl (in objective_cuda)");
/* Realize whichever part(s) of the model have changed, then calculate root's
* contribution to chi-square.
* The code here is somewhat opaque because more than one part of the model
* may have changed - if the "vary_delcor0" "vary_dopscale" "vary_radalb" and
* /or "vary_optalb" parameter is being used to permit joint parameter ad-
* justments. Before calling the vary_params routine, the size/shape and spin
* states must be realized (realize_mod and realize_spin); if albedos are
* being varied jointly with other parameters, the photometric state must
* also be realized (realize_photo); and in either case the 0th-order delay
* correction polynomial coefficients and the Doppler scaling factors must be
* reset to their saved values via the appropriate calls to realize_delcor
* and realize_dopscale, respectively.*/
if (newsize || newshape)
realize_mod_cuda(sdev_par, sdev_mod, type);
if (newspin) {
if (AF)
realize_spin_cuda_af(sdev_par, sdev_mod, sdev_dat, sdat->nsets);
else if (STREAMS)
realize_spin_cuda_streams(sdev_par, sdev_mod, sdev_dat, sdat->nsets);
else
realize_spin_cuda(sdev_par, sdev_mod, sdev_dat, sdat->nsets); }
if ((newsize && vary_alb_size) || ((newshape || newspin) && vary_alb_shapespin))
realize_photo_cuda(sdev_par, sdev_mod, 1.0, 1.0, 1); /* set R to R_save */
if ((newsize && vary_delcor0_size) || ((newshape || newspin) && vary_delcor0_shapespin))
realize_delcor_cuda(sdev_dat, 0.0, 1, sdat->nsets); /* set delcor0 to delcor0_save */
if ((newspin && vary_dopscale_spin) || ((newsize || newshape) && vary_dopscale_sizeshape))
realize_dopscale_cuda(sdev_par, sdev_dat, 1.0, 1); /* set dopscale to dopscale_save */
if (call_vary_params) {
/* Call vary_params to get the trial adjustments to 0th-order delay correc-
* tion polynomial coefficients, to Doppler scaling factors,and to radar
* and optical albedos, then send them to the branch nodes */
if (AF)
vary_params_af(sdev_par,sdev_mod,sdev_dat,spar->action,
&deldop_zmax,&rad_xsec,&opt_brightness,&cos_subradarlat,
sdat->nsets);
else if (STREAMS)
// vary_params_cuda_streams(sdev_par, sdev_mod, sdev_dat, spar->action,
// &deldop_zmax,&rad_xsec,&opt_brightness,&cos_subradarlat,
// sdat->nsets);
vary_params_cuda_streams2(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness, &cos_subradarlat,
sdat->nsets);
else
vary_params_cuda(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness,
&cos_subradarlat, sdat->nsets);
delta_delcor0 = (deldop_zmax - deldop_zmax_save)*KM2US;
if (cos_subradarlat != 0.0)
dopscale_factor = cos_subradarlat_save/cos_subradarlat;
if (rad_xsec != 0.0)
radalb_factor = rad_xsec_save/rad_xsec;
if (opt_brightness != 0.0)
optalb_factor = opt_brightness_save/opt_brightness;
}
if ((newsize && vary_alb_size) || ((newshape || newspin) && vary_alb_shapespin))
realize_photo_cuda(sdev_par, sdev_mod, radalb_factor, optalb_factor, 1); /* adjust R */
else if (newphoto)
realize_photo_cuda(sdev_par, sdev_mod, 1.0, 1.0, 0); /* set R_save to R */
if ((newsize && vary_delcor0_size) || ((newshape || newspin) && vary_delcor0_shapespin))
realize_delcor_cuda(sdev_dat, delta_delcor0, 1, sdat->nsets); /* adjust delcor0 */
else if (newdelcor)
realize_delcor_cuda(sdev_dat, 0.0, 0, sdat->nsets); /* set delcor0_save to delcor0 */
if ((newspin && vary_dopscale_spin) || ((newsize || newshape) && vary_dopscale_sizeshape))
realize_dopscale_cuda(sdev_par, sdev_dat, dopscale_factor, 1); /* adjust dopscale */
else if (newdopscale)
realize_dopscale_cuda(sdev_par, sdev_dat, 1.0, 0); /* set dopscale_save to dopscale */
if (newxyoff)
realize_xyoff_cuda(sdev_dat);
if (AF) {
calc_fits_cuda_af(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda_af(sdev_par, sdev_dat, 0, sdat->nsets);
}
else if (STREAMS) {
calc_fits_cuda_streams(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda_streams(sdev_par, sdev_dat, 0, sdat->nsets);
}
else {
calc_fits_cuda(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda(sdev_par, sdev_dat, 0);
}
/* Divide chi-square by DOF to get reduced chi-square. */
err /= sdat->dof;
/* If bestfit has set showvals = 1, display reduced chi-square. Then set
* spar->showstate = 1, so that when function penalties is called later,
* it "knows" that it should display the individual penalty values.
* Reset showstate to 0 if showvals = 0. */
if (showvals) {
printf("# %15s %e\n", "reduced chi2", err);
spar->showstate = 1;
}
else
spar->showstate = 0;
/* Compute penalties and add to reduced chi-square. Individual penalty values
* will be displayed if we set spar->showstate = 1 a few lines back. */
pens = penalties_cuda(sdev_par, sdev_mod, sdev_dat);
err += pens;
/* Double the objective function if there's an ellipsoid component with tiny
* or negative diameter, if any optical photometric parameters have invalid
* values, if any portion of the model lies outside specified POS window or
* outside any plane-of-sky fit image, or if model is too wide in delay-Dopp-
* ler space for any (delay-)Doppler fit image to be correctly constructed.
* This effectively rules out any models with any of these flaws. */
/* NOTE: TO-DO: baddiam may need to come from elsewhere other than spar.
* However, bestfit gets called only once and spar/smod/sdat gets copied
* only once. */
if (spar->baddiam) {
baddiam_factor = spar->bad_objfactor * exp(spar->baddiam_logfactor);
err *= baddiam_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal ellipsoid diameters\n",
baddiam_factor);
}
if (spar->badphoto) {
badphoto_factor = spar->bad_objfactor * exp(spar->badphoto_logfactor);
err *= badphoto_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal photometric parameters\n",
badphoto_factor);
}
if (spar->posbnd) {
check_posbnd = 1; /* tells bestfit about this problem */
posbnd_factor = spar->bad_objfactor * exp(spar->posbnd_logfactor);
err *= posbnd_factor;
if (showvals)
printf("# objective func multiplied by %.1f: model extends beyond POS frame\n",
posbnd_factor);
}
if (spar->badposet) {
check_badposet = 1; /* tells bestfit about this problem */
badposet_factor = spar->bad_objfactor * exp(spar->badposet_logfactor);
err *= badposet_factor;
if (showvals)
printf("# objective func multiplied by %.1f: plane-of-sky fit frame too small\n",
badposet_factor);
}
if (spar->badradar) {
check_badradar = 1; /* tells bestfit about this problem */
badradar_factor = spar->bad_objfactor * exp(spar->badradar_logfactor);
err *= badradar_factor;
if (showvals)
printf("# objective func multiplied by %.1f: model too wide in delay-Doppler space\n",
badradar_factor);
}
if (spar->baddopscale) {
baddopscale_factor = spar->bad_objfactor * exp(spar->baddopscale_logfactor);
err *= baddopscale_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal Doppler scaling factors\n",
baddopscale_factor);
}
/* Reset showvals to 0 if it had been 1 (i.e., turn off display of reduced
* chi-square and the individual penalty values). */
if (showvals)
fflush( stdout);
showvals = 0;
return err;
}
/* objective_cuda_streams is a version of objective_cuda that takes an extra
* argument - the cudaStreams created in bestfit_cuda2. The goal is to
* reduce overhead from stream creation/destruction to a minimum by having
* just one set number of streams per program run. */
__host__ double objective_cuda_streams(
double x,
struct vertices_t **verts,
unsigned char *htype,
unsigned char *dtype,
int *nframes,
int *nviews,
int *lc_n,
int nsets,
int nf,
hipStream_t *bf_stream)
{
double err, pens, delta_delcor0, dopscale_factor, radalb_factor,
optalb_factor, *dlogfactors, *hlogfactors;
unsigned char *dflags, *hflags;
gpuErrchk(hipMalloc((void**)&dflags, sizeof(unsigned char)*7));
gpuErrchk(hipMalloc((void**)&dlogfactors, sizeof(double)*7));
hflags = (unsigned char *) malloc(7*sizeof(unsigned char));
hlogfactors = (double *) malloc(7*sizeof(double));
/* Initialize local parameters */
delta_delcor0 = 0.0;
dopscale_factor = radalb_factor = optalb_factor = 1.0;
/* Assign new trial value to the model parameter being adjusted */
hipLaunchKernelGGL(( bf_set_hotparam_val_krnl), dim3(1),dim3(1), 0, 0, x); //(*hotparam) = x;
checkErrorAfterKernelLaunch("bf_set_hotparam_val_krnl (in objective_cuda)");
/* Realize whichever part(s) of the model have changed, then calculate root's
* contribution to chi-square.
* The code here is somewhat opaque because more than one part of the model
* may have changed - if the "vary_delcor0" "vary_dopscale" "vary_radalb" and
* /or "vary_optalb" parameter is being used to permit joint parameter ad-
* justments. Before calling the vary_params routine, the size/shape and spin
* states must be realized (realize_mod and realize_spin); if albedos are
* being varied jointly with other parameters, the photometric state must
* also be realized (realize_photo); and in either case the 0th-order delay
* correction polynomial coefficients and the Doppler scaling factors must be
* reset to their saved values via the appropriate calls to realize_delcor
* and realize_dopscale, respectively.*/
if (newsize || newshape)
realize_mod_cuda(sdev_par, sdev_mod, type);
if (newspin) {
if (AF)
realize_spin_cuda_af(sdev_par, sdev_mod, sdev_dat, nsets);
else if (STREAMS)
realize_spin_cuda_streams(sdev_par, sdev_mod, sdev_dat, nsets);
else if (STREAMS2) {
if (FLOAT)
realize_spin_cuda_streams2f(sdev_par, sdev_mod, sdev_dat, htype, nframes,
nviews, nsets, bf_stream);
else
realize_spin_cuda_streams2(sdev_par, sdev_mod, sdev_dat, htype, nframes,
nviews, nsets, bf_stream);
}
else
realize_spin_cuda(sdev_par, sdev_mod, sdev_dat, nsets);
}
if ((newsize && vary_alb_size) || ((newshape || newspin) && vary_alb_shapespin))
realize_photo_cuda(sdev_par, sdev_mod, 1.0, 1.0, 1); /* set R to R_save */
if ((newsize && vary_delcor0_size) || ((newshape || newspin) && vary_delcor0_shapespin)) {
if (FLOAT)
realize_delcor_cuda_f(sdev_dat, 0.0, 1, nsets, htype, nframes); /* set delcor0 to delcor0_save */
else
realize_delcor_cuda(sdev_dat, 0.0, 1, nsets); /* set delcor0 to delcor0_save */
}
if ((newspin && vary_dopscale_spin) || ((newsize || newshape) && vary_dopscale_sizeshape))
realize_dopscale_cuda_streams(sdev_par, sdev_dat, 1.0, 1, nsets, dtype); /* set dopscale to dopscale_save */
if (call_vary_params) {
/* Call vary_params to get the trial adjustments to 0th-order delay correc-
* tion polynomial coefficients, to Doppler scaling factors,and to radar
* and optical albedos, then send them to the branch nodes */
if (AF)
vary_params_af(sdev_par,sdev_mod,sdev_dat,spar->action,
&deldop_zmax,&rad_xsec,&opt_brightness,&cos_subradarlat,
nsets);
else if (STREAMS)
vary_params_cuda_streams2(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness, &cos_subradarlat,
nsets);
else if (STREAMS2) {
if (FLOAT)
vary_params_cuda_streams3f(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness, &cos_subradarlat,
nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
bf_stream);
else
vary_params_cuda_streams3(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness, &cos_subradarlat,
nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
bf_stream); }
else
vary_params_cuda(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness,
&cos_subradarlat, nsets);
delta_delcor0 = (deldop_zmax - deldop_zmax_save)*KM2US;
if (cos_subradarlat != 0.0)
dopscale_factor = cos_subradarlat_save/cos_subradarlat;
if (rad_xsec != 0.0)
radalb_factor = rad_xsec_save/rad_xsec;
if (opt_brightness != 0.0)
optalb_factor = opt_brightness_save/opt_brightness;
}
if ((newsize && vary_alb_size) || ((newshape || newspin) && vary_alb_shapespin))
realize_photo_cuda(sdev_par, sdev_mod, radalb_factor, optalb_factor, 1); /* adjust R */
else if (newphoto)
realize_photo_cuda(sdev_par, sdev_mod, 1.0, 1.0, 0); /* set R_save to R */
if ((newsize && vary_delcor0_size) || ((newshape || newspin) && vary_delcor0_shapespin)) {
if (FLOAT)
realize_delcor_cuda_f(sdev_dat, delta_delcor0, 1, nsets, htype, nframes); /* adjust delcor0 */
else
realize_delcor_cuda(sdev_dat, delta_delcor0, 1, nsets); /* adjust delcor0 */
}
else if (newdelcor) {
if (FLOAT)
realize_delcor_cuda_f(sdev_dat, 0.0, 0, nsets, htype, nframes); /* set delcor0_save to delcor0 */
else
realize_delcor_cuda(sdev_dat, 0.0, 0, nsets); /* set delcor0_save to delcor0 */
}
if ((newspin && vary_dopscale_spin) || ((newsize || newshape) && vary_dopscale_sizeshape))
realize_dopscale_cuda_streams(sdev_par, sdev_dat, dopscale_factor, 1, nsets, dtype); /* adjust dopscale */
else if (newdopscale)
realize_dopscale_cuda_streams(sdev_par, sdev_dat, 1.0, 0, nsets, dtype); /* set dopscale_save to dopscale */
if (newxyoff)
realize_xyoff_cuda_streams(sdev_dat, nsets, dtype);
if (AF) {
calc_fits_cuda_af(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda_af(sdev_par, sdev_dat, 0, nsets);
}
else if (STREAMS) {
calc_fits_cuda_streams(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda_streams(sdev_par, sdev_dat, 0, nsets);
}
else if (STREAMS2) {
calc_fits_cuda_streams2(sdev_par, sdev_mod, sdev_dat, verts, nviews,
nframes, lc_n, htype, nsets, nf, bf_stream);
err = chi2_cuda_streams2(sdev_par, sdev_dat, htype, dtype, nframes,
lc_n, 0, nsets, bf_stream);
}
else {
calc_fits_cuda(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda(sdev_par, sdev_dat, 0);
}
/* Divide chi-square by DOF to get reduced chi-square. */
err /= sdat->dof;
/* If bestfit has set showvals = 1, display reduced chi-square. Then set
* spar->showstate = 1, so that when function penalties is called later,
* it "knows" that it should display the individual penalty values.
* Reset showstate to 0 if showvals = 0. */
if (showvals) {
printf("# %15s %e\n", "reduced chi2", err);
spar->showstate = 1;
}
else
spar->showstate = 0;
/* Compute penalties and add to reduced chi-square. Individual penalty values
* will be displayed if we set spar->showstate = 1 a few lines back. */
pens = penalties_cuda(sdev_par, sdev_mod, sdev_dat);
err += pens;
/* Double the objective function if there's an ellipsoid component with tiny
* or negative diameter, if any optical photometric parameters have invalid
* values, if any portion of the model lies outside specified POS window or
* outside any plane-of-sky fit image, or if model is too wide in delay-Dopp-
* ler space for any (delay-)Doppler fit image to be correctly constructed.
* This effectively rules out any models with any of these flaws. */
/* NOTE: TO-DO: baddiam may need to come from elsewhere other than spar.
* However, bestfit gets called only once and spar/smod/sdat gets copied
* only once.
* flags[0] = dpar->baddiam;
flags[1] = dpar->badphoto;
flags[2] = dpar->posbnd;
flags[3] = dpar->badposet;
flags[4] = dpar->badradar;
flags[5] = dpar->baddopscale;
dlogfactors[0] = dpar->bad_objfactor;
dlogfactors[1] = dpar->baddiam_logfactor;
dlogfactors[2] = dpar->badphoto_logfactor;
dlogfactors[3] = dpar->posbnd_logfactor;
dlogfactors[4] = dpar->badposet_logfactor;
dlogfactors[5] = dpar->badradar_logfactor;
dlogfactors[6] = dpar->baddopscale_logfactor;
*/
hipLaunchKernelGGL(( ocs_get_flags_krnl), dim3(1),dim3(1), 0, 0, sdev_par, dflags, dlogfactors);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
gpuErrchk(hipMemcpy(hflags, dflags, sizeof(unsigned char)*7,
hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(hlogfactors, dlogfactors, sizeof(double)*6,
hipMemcpyDeviceToHost));
if (hflags[0]) {
baddiam_factor = hlogfactors[0] * exp(hlogfactors[1]);
err *= baddiam_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal ellipsoid diameters\n",
baddiam_factor);
}
if (hflags[1]) {
badphoto_factor = hlogfactors[0] * exp(hlogfactors[2]);
err *= badphoto_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal photometric parameters\n",
badphoto_factor);
}
if (hflags[2]) {
check_posbnd = 1; /* tells bestfit about this problem */
posbnd_factor = hlogfactors[0] * exp(hlogfactors[3]);
err *= posbnd_factor;
if (showvals)
printf("# objective func multiplied by %.1f: model extends beyond POS frame\n",
posbnd_factor);
}
if (hflags[3]) {
check_badposet = 1; /* tells bestfit about this problem */
badposet_factor = hlogfactors[0] * exp(hlogfactors[4]);
err *= badposet_factor;
if (showvals)
printf("# objective func multiplied by %.1f: plane-of-sky fit frame too small\n",
badposet_factor);
}
if (hflags[4]) {
check_badradar = 1; /* tells bestfit about this problem */
badradar_factor = hlogfactors[0] * exp(hlogfactors[5]);
err *= badradar_factor;
if (showvals)
printf("# objective func multiplied by %.1f: model too wide in delay-Doppler space\n",
badradar_factor);
}
if (hflags[5]) {
baddopscale_factor = hlogfactors[0] * exp(hlogfactors[6]);
err *= baddopscale_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal Doppler scaling factors\n",
baddopscale_factor);
}
/* Reset showvals to 0 if it had been 1 (i.e., turn off display of reduced
* chi-square and the individual penalty values). */
if (showvals)
fflush( stdout);
showvals = 0;
free(hflags);
free(hlogfactors);
hipFree(dflags);
hipFree(dlogfactors);
return err;
}
| f60bcb525dfadf2dd5260145cca568d39fc017ad.cu | /*****************************************************************************************
bestfit.c
Iterate over all floating parameters, at each step adjusting just one parameter x in order
to minimize objective(x), the objective function (reduced chi-square plus penalties).
Continue until the fractional reduction in objective(x) due to a full pass through the
parameter list is less than term_prec. Return the final value of the objective function.
__________________________________________________________________________________________
Modified 2016 July 7 by Matt Engels:
Adapted for use in shape-cuda.
------------------------------------------------------------------------------------------
Modified 2014 February 19 by CM:
Allow for multiple optical scattering laws when setting the "vary_hapke" flag
Modified 2013 July 14 by CM:
Implement the "term_maxiter" parameter
Modified 2012 July 5 by MCN and CM:
Use the gethostname function rather than the HOST environment variable to get root's
hostname
List root's PID in addition to the hostname
List the PID for each branch node, not just the hostname
Modified 2012 June 13 by CM:
Implement "objfunc_start" parameter
Modified 2012 March 23 by CM:
Implement Doppler scaling -- more particularly, simultaneous adjustment of shape/spin
parameters and Doppler scale factors via the "vary_dopscale" parameter
Modified 2010 April 12 by CM:
Bug fix: When fitting a size, shape, or spin parameter with the
"vary_delcor0" parameter being used, call realize_delcor to reset
the 0th-order delay correction polynomial coefficients to their
saved values before calling vary_params. (For infinitely fine
model resolution and delay-Doppler resolution this wouldn't
matter but in practice it does.)
Modified 2009 November 15 by CM:
Fix printf statement with too many arguments
Modified 2009 July 5 by CM:
Add "npar_update" parameter rather than hard-wiring an update (rewrite
mod and obs files and display reduced chi2 and penalty functions)
every 20th parameter adjustment
Modified 2009 April 3 by CM:
If the model has illegal properties (e.g., negative ellipsoid diameters)
then, for each type of problem, multiply the objective function not
only by the "bad_objfactor" parameter but also by an additional
factor that increases as the problem gets worse. The
"baddiam_logfactor" "badphoto_logfactor" "posbnd_logfactor"
"badposet_logfactor" and "badradar_logfactor" parameters are the
logarithms of the additional factors for the five possible problem
types; the calc_fits routine computes the logarithms rather than the
factors themselves so as to avoid floating-point overflow.
Revise MPI_CALC so that root receives the "posbnd_logfactor" parameter
from each branch node rather than the "posbnd" parameter:
posbnd_logfactor > 0.0 if the model extends beyond the POS frame
for any of the branch node's datasets. If root sees that this
value is > 0.0, it will set its "posbnd" flag and will increase the
objective function accordingly.
Revise MPI_CALC so that root receives the "badposet_logfactor"
parameter from each branch node: badposet_logfactor > 0.0 if the
model extends beyond the fit frame for any of the branch node's
plane-of-sky datasets. If root sees that this value is > 0.0, it
will set its "badposet" flag and will increase the objective
function accordingly.
Revise MPI_CALC so that root receives the "badradar_logfactor"
parameter from each branch node: badradar_logfactor > 0.0 if the
model is too wide in delay-Doppler space for the program to
construct some or all (delay-)Doppler fit frames. If root sees
that this value is > 0.0, it will set its "badradar" flag and will
increase the objective function accordingly.
For MPI_Recv calls, mpi_par[0] is no longer equal to the MPI action,
since the message tag argument already serves that purpose (as of
2008 April 10) -- so the other mpi_par elements are renumbered
Modified 2008 August 10 by CM:
Never terminate the fit at the end of a partial iteration -- that is,
after the first iteration of a fit where first_fitpar > 0
Modified 2008 July 11 by CM:
Display the hostname even for single-processor fits
Modified 2008 April 10 by CM:
For parallel-processing fits, display the hostname for each node
Use message tag argument to MPI_Recv to identify the MPI action
Modified 2007 August 29 by CM:
Implement the "avoid_badpos" parameter: if this parameter is turned on
and the model extends beyond the POS frame and it is time to fit a
size parameter, start by shrinking that size parameter until the
model fits within the POS frame
Implement the "bad_objfactor" parameter in routine objective: multiply
the objective function by this factor for illegal photometric
parameters, for tiny or negative ellipsoid diameters, and for
models that extend beyond the plane-of-sky frame. (Previously
this factor was fixed at 2.0.)
Rename MPI_TAG to MPI_TAG_1 to avoid name conflict with mpich headers
Modified 2007 August 16 by CM:
Implement the "term_badmodel" parameter: If this parameter is turned on
and, at the end of any fit iteration, the model ever extends beyond
the POS frame OR has any illegal photometric parameters OR has any
tiny or negative ellipsoid diameters, the fit is terminated.
Modified 2007 August 10 by CM:
Eliminate unused variables
Modified 2006 December 20 by CM:
Revise MPI_CALC so that root receives the "posbnd" parameter from each
branch node, so that the objective function can be doubled if the
model extends beyond the plane-of-sky frame for any datasets
If the model extends beyond the plane-of-sky frame for any trial value
of a parameter, evaluate the model for the best-fit parameter value
to check whether or not it extends beyond the POS frame
Modified 2006 October 1 by CM:
Add two new arguments to realize_delcor
Add three new arguments to realize_photo
Implement "vary_delcor0" "vary_radalb" and "vary_optalb" parameters
Implement SIZEPAR parameters via the "newsize" variable
Modified 2005 June 27 by CM:
Renamed "round" function to "iround" to avoid conflict
Modified 2005 March 17 by CM:
For parallel processing, check that root is receiving the responses
to the correct broadcast
Root no longer needs to compute degrees of freedom or to receive
dof values from branch nodes: Now they are computed in read_dat
Degrees of freedom can now be floating-point rather than integer
Modified 2005 February 28 by CM:
Add screen warnings if objective function has been doubled due to
(a) tiny or negative ellipsoid diameters
(b) illegal photometric parameters
(c) model extending beyond the model POS frame
Initialize the three parameters (baddiam, badphoto, posbnd) that
flag these three problems in other routines (realize_mod,
realize_photo, calc_fits) rather than in objective(x), so that
these three parameters can be used for actions other than "fit"
Rename DATAPAR to be DELCORPAR
Add XYOFFPAR and implement the new realize_xyoff routine
Modified 2005 February 22 by CM:
Move branch nodes' signoff statements from shape.c to here, so that
they can appear in order
Modified 2005 February 13 by CM:
Rename objective function "f(x)" to be "objective(x)"
Only broadcast to branch nodes if there are any branch nodes
(i.e., if mpi_nproc > 1)
Broadcast the new MPI_DUMMYPAR signal to branch nodes before evaluating
objective(0.0), the objective function for the existing model;
this tells each branch node to point hotparam to a dummy variable
rather than to a model parameter, so that the dummy variable will
be set to 0.0 and the model will be unchanged.
Broadcast the new MPI_CALFACT signal to branch nodes to get updated
calibration factors before rewriting the obs file
Root now realizes the model after setting a parameter to its best value
Make sure that root and branch nodes update the model (i.e., that they
call the calc_fits and chi2 routines) before rewriting the mod and
obs files and before calling routine show_deldoplim
Avoid unnecessary model realizations for root by allowing newshape,
newspin, newphoto, and newdelcor to be 0, not always 1 as before
Move MPI_DONE broadcast to here from shape.c
Modified 2005 January 25 by CM:
Eliminated unused variable
Modified 2005 January 10 by CM:
When fitting using parallel processing, ping all of the branch nodes
and inform the user that they're active
Modified 2004 October 29 by CM:
Add "first_fitpar" parameter so that a fit can be started (or resumed)
at some parameter (counting from 0) other than the first parameter
Modified 2004 October 10 by CM:
Fix chi-square display at start of each iteration and at the
end of the fit by calling realize_mod, realize_spin, realize_photo,
realize_delcor, and calc_fits before calling chi2
Modified 2004 August 13 by CM:
Call modified minimum search routine brent_abs rather than brent
so that absolute fitting tolerances can be specified
Modified 2004 May 21 by CM:
Display the final values of the individual penalty functions
Modified 2004 April 3 by CM:
Add the "list_breakdown" argument to routine chi2 so that we can
display the chi2 breakdown by data type (Doppler, delay-Doppler,
POS, lightcurves) at the start of each fit iteration and at
the end of the fit
Modified 2004 February 26 by CM:
realize_photo now takes two arguments rather than one
Modified 2003 April 26 by CM:
Added "show_deldoplim" call at the end of each fit iteration,
to check for overly tight data vignetting
Modified 2003 April 23 by CM:
Implemented '=' state for delay correction polynomial coefficients
via the "realize_delcor" routine
Modified 2003 April 17 by CM:
Added "baddiam" parameter to function f so that the objective
function is doubled if an ellipsoid component has a tiny or
negative diameter
Modified 2003 April 2 by CM:
In function f (which computes reduced-chi-squared-plus-penalties),
moved call to "penalties" from before spar->showstate is set
to after.
Values of reduced chi-squared and of the various penalties are
printed to the screen after every 20th parameter adjustment.
To be precise, they're printed at the very first call to f when
adjusting parameter 21, 41, 61, etc. This call is made within
function bestfit by minimum-bracketing function mnbrak;
it corresponds to the *unadjusted* value of parameter 21 (or 41
or ...), which is what we want.
Until now, the individual penalty values were being printed on the
*second* call to f, also made by mnbrak but with parameter 21
incremented by the relevant initial step size (e.g., length_step).
Hence these printed values were irrelevant and misleadingly large.
Moving the call to "penalties" later in the code fixes the problem.
*****************************************************************************************/
extern "C" {
#include "../shape/head.h"
#include "../shape/shape-cuda.h"
}
static __device__ double *hotparam;
static struct par_t *spar, *sdev_par;
static struct mod_t *smod, *sdev_mod;
static struct dat_t *sdat, *sdev_dat;
static int newsize, newshape, newspin, newphoto, newdelcor, newdopscale, newxyoff,
showvals=0, vary_delcor0_size, vary_delcor0_shapespin, vary_dopscale_spin,
vary_dopscale_sizeshape, vary_alb_size, vary_alb_shapespin, vary_hapke,
call_vary_params, check_posbnd, check_badposet, check_badradar;
static double deldop_zmax, deldop_zmax_save, cos_subradarlat, cos_subradarlat_save,
rad_xsec, rad_xsec_save, opt_brightness, opt_brightness_save, baddiam_factor,
badphoto_factor, posbnd_factor, badposet_factor, badradar_factor,
baddopscale_factor;
static unsigned char type;
static double hotparamval;
double objective_cuda(double x);
__host__ double objective_cuda_streams(double x, struct vertices_t **verts,
unsigned char *htype, unsigned char *dtype, int *nframes, int *nviews,
int *lc_n, int nsets, int nf, cudaStream_t *bf_stream);
__device__ double bf_hotparamval, bf_dummyval;
__device__ int bf_partype;
__global__ void bf_get_flags_krnl(struct par_t *dpar, unsigned char *flags) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
flags[0] = dpar->baddiam;
flags[1] = dpar->badphoto;
flags[2] = dpar->posbnd;
flags[3] = dpar->badposet;
flags[4] = dpar->badradar;
flags[5] = dpar->baddopscale;
}
}
__global__ void ocs_get_flags_krnl(struct par_t *dpar, unsigned char *flags,
double *dlogfactors) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
flags[0] = dpar->baddiam;
flags[1] = dpar->badphoto;
flags[2] = dpar->posbnd;
flags[3] = dpar->badposet;
flags[4] = dpar->badradar;
flags[5] = dpar->baddopscale;
dlogfactors[0] = dpar->bad_objfactor;
dlogfactors[1] = dpar->baddiam_logfactor;
dlogfactors[2] = dpar->badphoto_logfactor;
dlogfactors[3] = dpar->posbnd_logfactor;
dlogfactors[4] = dpar->badposet_logfactor;
dlogfactors[5] = dpar->badradar_logfactor;
dlogfactors[6] = dpar->baddopscale_logfactor;
}
}
__global__ void bf_set_hotparam_initial_krnl() {
/* Single-threaded kernel */
if (threadIdx.x == 0)
hotparam = &bf_dummyval;
}
__global__ void bf_set_hotparam_pntr_krnl(double **fpntr,
int *fpartype, int p) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
hotparam = fpntr[p]; /* This is pointing at a device variable */
bf_partype = fpartype[p]; /* parameter type */
}
}
__global__ void bf_get_hotparam_val_krnl() {
/* Single threaded kernel */
if (threadIdx.x == 0)
bf_hotparamval = *hotparam;
}
__global__ void bf_mult_hotparam_val_krnl(double factor) {
/* Single-threaded kernel */
if (threadIdx.x == 0)
*hotparam *= factor;
}
__global__ void bf_set_hotparam_val_krnl(double newvalue) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
*hotparam = newvalue;
bf_hotparamval = newvalue;
}
}
__global__ void set_verts_shortcut_krnl(struct mod_t *dmod, struct vertices_t **verts) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
verts[0] = &dmod->shape.comp[0].real;
}
}
__host__ double bestfit_CUDA(struct par_t *dpar, struct mod_t *dmod,
struct dat_t *ddat, struct par_t *par, struct mod_t *mod,
struct dat_t *dat)
{
char hostname[MAXLEN], dofstring[MAXLEN];
int i, iter=0, p, cntr, first_fitpar, partype, keep_iterating=1, ilaw;
long pid_long;
pid_t pid;
double beginerr, enderr, ax, bx, cx, obja, objb, objc, xmin,
final_chi2, final_redchi2, dummyval2, dummyval3, dummyval4,
delta_delcor0, dopscale_factor, radalb_factor, optalb_factor;
unsigned char *flags;
dim3 THD, BLK;
/* Get the hostname of host machine and the PID */
(void) gethostname(hostname, MAXLEN-1);
pid = getpid();
pid_long = (long) pid; /* Assumes pid_t fits in a long */
printf("#\n# CUDA fit (pid %ld on %s)\n", pid_long, hostname);
fflush(stdout);
gpuErrchk(cudaMalloc((void**)&sdev_par, sizeof(struct par_t)));
gpuErrchk(cudaMemcpy(sdev_par, &par, sizeof(struct par_t), cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&sdev_mod, sizeof(struct mod_t)));
gpuErrchk(cudaMemcpy(sdev_mod, &mod, sizeof(struct mod_t), cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&sdev_dat, sizeof(struct dat_t)));
gpuErrchk(cudaMemcpy(sdev_dat, &dat, sizeof(struct dat_t), cudaMemcpyHostToDevice));
cudaCalloc1((void**)&flags, sizeof(unsigned char), 7);
/* Initialize static global pointers used by objective(x) below
to be compatible with "Numerical Recipes in C" routines */
spar = par; smod = mod; sdat = dat;
sdev_par = dpar; sdev_mod = dmod; sdev_dat = ddat;
/* Initialize static global parameters */
newsize = newshape = newspin = newphoto = newdelcor = newdopscale = newxyoff = 1;
deldop_zmax = deldop_zmax_save = 0.0;
cos_subradarlat = cos_subradarlat_save = 0.0;
rad_xsec = rad_xsec_save = 0.0;
opt_brightness = opt_brightness_save = 0.0;
vary_delcor0_size = (par->vary_delcor0 != VARY_NONE);
vary_delcor0_shapespin = (par->vary_delcor0 == VARY_ALL);
vary_dopscale_spin = (par->vary_dopscale != VARY_NONE);
vary_dopscale_sizeshape = (par->vary_dopscale == VARY_ALL);
vary_alb_size = (par->vary_radalb != VARY_NONE || par->vary_optalb != VARY_NONE);
vary_alb_shapespin = (par->vary_radalb == VARY_ALL || par->vary_optalb == VARY_ALL);
vary_hapke = 0;
if (par->vary_optalb != VARY_NONE)
for (ilaw=0; ilaw<mod->photo.noptlaws; ilaw++)
if (mod->photo.opttype[ilaw] == HAPKE || mod->photo.opttype[ilaw] == HARMHAPKE
|| mod->photo.opttype[ilaw] == INHOHAPKE)
vary_hapke = 1;
call_vary_params = (par->vary_delcor0 != VARY_NONE || par->vary_dopscale != VARY_NONE
|| par->vary_radalb != VARY_NONE
|| par->vary_optalb != VARY_NONE);
/* Initialize local parameters */
delta_delcor0 = 0.0;
dopscale_factor = radalb_factor = optalb_factor = 1.0;
type = mod->shape.comp[0].type;
/* Allocate memory for pointers, steps, and tolerances */
cudaCalloc1((void**)&fparstep, sizeof(double), par->nfpar);
cudaCalloc1((void**)&fpartol, sizeof(double), par->nfpar);
cudaCalloc1((void**)&fparabstol, sizeof(double), par->nfpar);
cudaCalloc1((void**)&fpartype, sizeof(int), par->nfpar);
cudaCalloc1((void**)&fpntr, sizeof(double*), par->nfpar);
for (i=0; i<par->nfpar; i++)
cudaCalloc1((void**)&fpntr[i], sizeof(double), 1);
/* The following call sets up the parameter lists allocated above */
mkparlist_cuda(dpar, dmod, ddat, fparstep, fpartol,
fparabstol, fpartype, fpntr);
/* Compute deldop_zmax_save, cos_subradarlat_save, rad_xsec_save, and
* opt_brightness_save for the initial model */
if (call_vary_params)
{
realize_mod_cuda(dpar, dmod, type);
if (AF) realize_spin_cuda_af(dpar, dmod, ddat, dat->nsets);
else if (STREAMS) realize_spin_cuda_streams(dpar, dmod, ddat, dat->nsets);
else realize_spin_cuda(dpar, dmod, ddat, dat->nsets);
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
/* realize_delcor and realize_dopscale were called by read_dat */
if (AF)
vary_params_af(dpar, dmod, ddat, par->action, &deldop_zmax_save,
&rad_xsec_save, &opt_brightness_save,
&cos_subradarlat_save, dat->nsets);
else if (STREAMS)
// vary_params_cuda_streams(dpar, dmod, ddat, par->action, &deldop_zmax_save,
// &rad_xsec_save, &opt_brightness_save,
// &cos_subradarlat_save, dat->nsets);
vary_params_cuda_streams2(dpar, dmod, ddat, par->action,
&deldop_zmax_save, &rad_xsec_save, &opt_brightness_save,
&cos_subradarlat_save, dat->nsets);
else
vary_params_cuda(dpar, dmod, ddat, par->action, &deldop_zmax_save,
&rad_xsec_save, &opt_brightness_save,
&cos_subradarlat_save, dat->nsets);
}
printf("rad_xsec: %f\n", rad_xsec_save);
printf("deldop_zmax: %f\n", (float)deldop_zmax_save);
/* Point hotparam to a dummy variable (dummyval) rather than to a model pa-
* rameter; then call objective(0.0) to set dummy variable = 0.0, realize
* the initial model, calculate the fits, return initial model's objective
* function as enderr. */
bf_set_hotparam_initial_krnl<<<1,1>>>();
checkErrorAfterKernelLaunch("bf_set_hotparam_initial_krnl");
enderr = objective_cuda(0.0);
printf("#\n# searching for best fit ...\n");
printf("%4d %8.6f to begin", 0, enderr);
/* Launch single-thread kernel to retrieve flags in dev_par */
/* flags[0] = dpar->baddiam;
flags[1] = dpar->badphoto;
flags[2] = dpar->posbnd;
flags[3] = dpar->badposet;
flags[4] = dpar->badradar;
flags[5] = dpar->baddopscale;*/
bf_get_flags_krnl<<<1,1>>>(dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
/* Now act on the flags just retrieved from dev_par */
if (flags[0]) printf(" (BAD DIAMS)");
if (flags[1]) printf(" (BAD PHOTO)");
if (flags[2]) printf(" (BAD POS)");
if (flags[3]) printf(" (BAD POSET)");
if (flags[4]) printf(" (BAD RADAR)");
if (flags[5]) printf(" (BAD DOPSCALE)"); printf("\n");
fflush(stdout);
/* Display the region within each delay-Doppler or Doppler frame that, ac-
* cording to initial model, has nonzero power. A warning is displayed if
* any region extends beyond the data limits: the vignetting is too tight,
* or else some model parameter (such as a delay correction polynomial co-
* efficient) is seriously in error. */
show_deldoplim_cuda(dat, ddat);
/* Set the starting fit parameter for the first iteration only */
first_fitpar = par->first_fitpar;
if (first_fitpar < 0 || first_fitpar >= par->nfpar) {
printf("ERROR: need 0 <= first_fitpar < nparams (%d)\n", par->nfpar);
bailout("bestfit.c\n");
}
/* Iteratively adjust model; for each iteration, step through all free pa-
* rameters, adjusting one parameter at a time so as to minimize the objec-
* tive function at each step. Stop when fractional decrease in the objec-
* tive function from one iteration to the next is less than term_prec. */
// do {
showvals = 1; /* show reduced chi-square and penalties at beginning */
beginerr = enderr;
printf("# iteration %d %f", ++iter, beginerr);
/* Launch single-thread kernel to retrieve flags in dev_par */
bf_get_flags_krnl<<<1,1>>>(dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
/* Now act on the flags just retrieved from dev_par */
if (flags[0]) printf(" (BAD DIAMS)");
if (flags[1]) printf(" (BAD PHOTO)");
if (flags[2]) printf(" (BAD POS)");
if (flags[3]) printf(" (BAD POSET)");
if (flags[4]) printf(" (BAD RADAR)");
if (flags[5]) printf(" (BAD DOPSCALE)"); printf("\n");
fflush(stdout);
/* Show breakdown of chi-square by data type */
if (AF)
chi2_cuda_af(dpar, ddat, 1, dat->nsets);
else if (STREAMS)
chi2_cuda_streams(dpar, ddat, 1, dat->nsets);
else
chi2_cuda(dpar, ddat, 1);
/* Loop through the free parameters */
cntr = first_fitpar % par->npar_update;
for (p=first_fitpar; p<1/*par->nfpar*/; p++) {
/* Adjust only parameter p on this try */
bf_set_hotparam_pntr_krnl<<<1,1>>>(fpntr, fpartype, p);
checkErrorAfterKernelLaunch("bf_set_hotparam_pntr_krnl");
gpuErrchk(cudaMemcpyFromSymbol(&partype, bf_partype, sizeof(int),
0, cudaMemcpyDeviceToHost));
newsize = newshape = newspin = newphoto = newdelcor = newdopscale
= newxyoff = 0;
if (partype == SIZEPAR) newsize = 1;
else if (partype == SHAPEPAR) newshape = 1;
else if (partype == SPINPAR) newspin = 1;
else if (partype == PHOTOPAR) newphoto = 1;
else if (partype == DELCORPAR) newdelcor = 1;
else if (partype == DOPSCALEPAR) newdopscale = 1;
else if (partype == XYOFFPAR) newxyoff = 1;
/* If this is a size parameter AND model extends beyond POS frame
* AND the "avoid_badpos" parameter is turned on, shrink model by
* 5% at a time until it fits within the POS frame.
* We must start with the redundant model evaluation for the un-
* changed value of the size parameter, in case the first call to
* objective displays reduced chi-square and the penalty functions. */
if (par->avoid_badpos && partype == SIZEPAR) {
bf_get_flags_krnl<<<1,1>>>(dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
/* Get value of (*hotparam) */
bf_get_hotparam_val_krnl<<<1,1>>>();
checkErrorAfterKernelLaunch("bf_get_hotparam_val_krnl");
gpuErrchk(cudaMemcpyFromSymbol(&hotparamval, bf_hotparamval,
sizeof(double), 0, cudaMemcpyDeviceToHost));
while (flags[2]) {
objective_cuda(hotparamval);
bf_get_flags_krnl<<<1,1>>>(dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
if (flags[2]) {
/* Set the value pointed to by hotparam to 0.95 of its
* previous value */
bf_mult_hotparam_val_krnl<<<1,1>>>(0.95);
checkErrorAfterKernelLaunch("bf_mult_hotparam_val_krnl");
}
}
}
/* Get value of (*hotparam) so that mnbrak can use it*/
bf_get_hotparam_val_krnl<<<1,1>>>();
checkErrorAfterKernelLaunch("bf_get_hotparam_val_krnl");
gpuErrchk(cudaMemcpyFromSymbol(&hotparamval, bf_hotparamval,
sizeof(double), 0, cudaMemcpyDeviceToHost));
/* Use Numerical Recipes routine mnbrak to bracket a minimum in the
* objective function (reduced chi-square plus penalties) objec-
* tive(x), where x is the value of parameter p. As initial trial
* parameter values, use ax (unadjusted value) and bx, that value
* incremented by the appropriate step size (length_step,spin_step,
* etc.). mnbrak returns 3 parameter values, with bx between ax
* and cx; note that ax and bx are changed from their input values.
* It also returns the 3 corresponding objective(x) values, where
* objb is less than obja and objc. Hence there is at least one
* local minimum (but not necessarily *any* global minimum)
* somewhere between ax and cx. */
ax = hotparamval;
bx = ax + par->fparstep[p]; /* par usage us fine here */
mnbrak( &ax, &bx, &cx, &obja, &objb, &objc, objective_cuda);
/* Before homing in on local minimum, initialize flags that will
* tell us if model extended beyond POS frame (sky rendering) for
* any trial parameter value(s), if it extended beyond any POS ima-
* ges, and if it was too wide in delay-Doppler space */
check_posbnd = 0;
check_badposet = 0;
check_badradar = 0;
/* Now use Numerical Recipes function brent to find local minimum -
* that is, to find xmin, the best value of x, to within the
* *fractional* tolerance specified for parameter p (length_tol,
* spin_tol, etc.). brent's return value is the minimized objective
* function, objective(xmin). If more than one local minimum bet-
* ween ax and cx, brent might not find the best one. brent_abs is
* a modified version of brent that has an absolute fitting tole-
* rance as one of its arguments, in addition to the existing
* fractional tolerance. */
enderr = brent_abs( ax, bx, cx, objective_cuda,
par->fpartol[p], par->fparabstol[p], &xmin);
/* Realize whichever part(s) of the model has changed.
*
* The code here is somewhat opaque because more than one part of
* the model may have changed - if the "vary_delcor0" "vary_radalb"
* and/or "vary_optalb" parameter is being used to permit joint pa-
* rameter adjustments. Before calling the vary_params routine, the
* size/shape and spin states must be realized (realize_mod and
* realize_spin); if albedos are being varied jointly with other
* parameters, the photometric state must also be realized
* (realize_photo); and in either case the 0th-order delay correc-
* tion polynomial coefficients must be reset to their saved
* values via the appropriate call to realize_delcor. */
/* Set the value pointed to by hotparam to 0.95 of its
* previous value (*hotparam) = xmin; */
bf_set_hotparam_val_krnl<<<1,1>>>(xmin);
checkErrorAfterKernelLaunch("bf_set_hotparam_val_krnl");
gpuErrchk(cudaMemcpyFromSymbol(&hotparamval, bf_hotparamval,
sizeof(double), 0, cudaMemcpyDeviceToHost));
if (newsize || newshape)
realize_mod_cuda(dpar, dmod, type);
if (newspin) {
if (AF) realize_spin_cuda_af(dpar, dmod, ddat, dat->nsets);
else if (STREAMS) realize_spin_cuda_streams(dpar, dmod, ddat, dat->nsets);
else realize_spin_cuda(dpar, dmod, ddat, dat->nsets);
}
if ((newsize && vary_alb_size) || ((newshape ||
newspin) && vary_alb_shapespin))
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 1); /* set R to R_save */
if ((newsize && vary_delcor0_size) || ((newshape || newspin)
&& vary_delcor0_shapespin))
realize_delcor_cuda(ddat, 0.0, 1, dat->nsets); /* set delcor0 to delcor0_save */
if ((newspin && vary_dopscale_spin) || ((newsize || newshape)
&& vary_dopscale_sizeshape))
realize_dopscale_cuda(dpar, ddat, 1.0, 1); /* set dopscale to dopscale_save */
if (call_vary_params) {
/* Call vary_params to get the adjustments to 0th-order delay
* correction polynomial coefficients, to Doppler scaling fac-
* tors, and to radar and optical albedos */
if (AF)
vary_params_af(dpar,dmod,ddat, 11, &deldop_zmax,&rad_xsec,
&opt_brightness,&cos_subradarlat, dat->nsets);
else //11 - this used to be MPI_SETPAR_VARY
vary_params_cuda(dpar,dmod,ddat,11,&deldop_zmax,&rad_xsec,
&opt_brightness, &cos_subradarlat, dat->nsets);
delta_delcor0 = (deldop_zmax - deldop_zmax_save)*KM2US;
if (cos_subradarlat != 0.0)
dopscale_factor = cos_subradarlat_save/cos_subradarlat;
if (rad_xsec != 0.0)
radalb_factor = rad_xsec_save/rad_xsec;
if (opt_brightness != 0.0)
optalb_factor = opt_brightness_save/opt_brightness;
}
if ((newsize && vary_alb_size) || ((newshape || newspin) &&
vary_alb_shapespin)) {
realize_photo_cuda(dpar, dmod, radalb_factor, optalb_factor, 2); /* reset R, then R_save */
/* Must update opt_brightness_save for Hapke optical scattering
* law, since single-scattering albedo w isn't just an overall
* scaling factor */
if (vary_hapke) {
if (AF)
vary_params_af(dpar,dmod,ddat,12,&dummyval2,&dummyval3,
&opt_brightness_save,&dummyval4, dat->nsets);
else // used to be MPI_SETPAR_HAPKE
vary_params_cuda(dpar,dmod,ddat,12,&dummyval2,&dummyval3,
&opt_brightness_save, &dummyval4, dat->nsets);
}
} else if (newphoto) {
rad_xsec_save = rad_xsec;
opt_brightness_save = opt_brightness;
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
}
if ((newsize && vary_delcor0_size) || ((newshape || newspin) &&
vary_delcor0_shapespin)) {
deldop_zmax_save = deldop_zmax;
realize_delcor_cuda(ddat, delta_delcor0, 2, dat->nsets); /* reset delcor0, then delcor0_save */
} else if (newdelcor) {
realize_delcor_cuda(ddat, 0.0, 0, dat->nsets); /* set delcor0_save to delcor0 */
}
if ((newspin && vary_dopscale_spin) || ((newsize || newshape) &&
vary_dopscale_sizeshape)) {
cos_subradarlat_save = cos_subradarlat;
realize_dopscale_cuda(dpar, ddat, dopscale_factor, 2); /* reset dopscale, then dopscale_save */
} else if (newdopscale) {
realize_dopscale_cuda(dpar, ddat, 1.0, 0); /* set dopscale_save to dopscale */
}
if (newxyoff)
realize_xyoff_cuda(ddat);
/* If the model extended beyond POS frame (sky rendering) for any
* trial parameter value(s), if it extended beyond any plane-of-
* sky fit frames, or if it was too wide in delay-Doppler space,
* evaluate model for best-fit parameter value to check if these
* problems persist - that is, to update "posbnd" "badposet" and
* "badradar" parameters for updated model.
* (This needn't be done for "baddiam" "badphoto" flags: if we've
* just finished adjusting an ellipsoid dimension or photometric
* parameter, realize_mod or realize_photo was called in code block
* above in order to realize the changed portion of model, and that
* call updated corresponding flag. Also we needn't worry about the
* "baddopscale" flag, since realize_dopscale was called above if
* Doppler scaling factors were changed.) The call to objective
* (*hotparam) first sets *hotparam (the parameter that we just
* adjusted) equal to itself (i.e., no change) and then calls
* calc_fits to evaluate the model for all datasets. */
if (check_posbnd || check_badposet || check_badradar)
objective_cuda(hotparamval);//(*hotparam);
/* Launch single-thread kernel to retrieve flags in dev_par */
bf_get_flags_krnl<<<1,1>>>(dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
/* Display the objective function after each parameter adjustment. */
printf("%4d %8.6f %d", p, enderr, iround(par->fpartype[p]));
if (flags[0]) printf(" (BAD DIAMS)");
if (flags[1]) printf(" (BAD PHOTO)");
if (flags[2]) printf(" (BAD POS)");
if (flags[3]) printf(" (BAD POSET)");
if (flags[4]) printf(" (BAD RADAR)");
if (flags[5]) printf(" (BAD DOPSCALE)");
printf("\n");
fflush(stdout);
/* Display reduced chi-square and individual penalty values after
* every 20th parameter adjustment. Setting showvals to 1 here
* means that these things will be displayed next time objective(x)
* is evaluated - at start of NEXT parameter adjustment. Specifi-
* cally, they will be displayed when routine mnbrak evaluates
* objective(x) for *unadjusted* parameter value ax (see comment
* above).
* Also rewrite model and obs files after every 20th parameter
* adjustment. Most of obs file doesn't change, but some floating
* parameters (i.e. delay correction polynomial coefficients) do. */
if (++cntr >= par->npar_update) {
cntr = 0;
showvals = 1;
if (AF) {
calc_fits_cuda_af(dpar, dmod, ddat);
chi2_cuda_af(dpar, ddat, 0, dat->nsets);
}
else if (STREAMS) {
calc_fits_cuda_streams(sdev_par, sdev_mod, sdev_dat);
chi2_cuda_streams(sdev_par, sdev_dat, 0, sdat->nsets);
}
else {
calc_fits_cuda(dpar, dmod, ddat);
chi2_cuda(dpar, ddat, 0);
}
//write_mod( par, mod);
//write_dat( par, dat);
}
}
/* End of this iteration: Write model and data to disk, and display the
* region within each delay-Doppler or Doppler frame for which model
* power is nonzero. */
if (cntr != 0) {
if (AF){
calc_fits_cuda_af(dpar, dmod, ddat);
chi2_cuda_af(dpar, ddat, 0, dat->nsets);
}
else if (STREAMS) {
calc_fits_cuda_streams(sdev_par, sdev_mod, sdev_dat);
chi2_cuda_streams(sdev_par, sdev_dat, 0, sdat->nsets);
}
else {
calc_fits_cuda(dpar, dmod, ddat);
chi2_cuda(dpar, ddat, 0);
}
//write_mod( par, mod);
//write_dat( par, dat);
}
show_deldoplim_cuda(dat, ddat);
/* Check if we should start a new iteration */
if (iter == par->term_maxiter) {
/* Just completed last iteration permitted by "term_maxiter" para-
* meter, so stop iterating; note that since iter is 1-based, this
* test is always false if "term_maxiter" = 0 (its default value) */
keep_iterating = 0;
} else if (first_fitpar > 0) {
/* Just completed partial iteration (possible for iteration 1): if
* "objfunc_start" parameter was given, check if fractional decrea-
* se in objective function *relative to objfunc_start* during the
* just-completed iteration was larger than term_prec, thus
* justifying a new iteration; if it wasn't specified, definitely
* proceed to a new iteration. */
if (par->objfunc_start > 0.0)
keep_iterating = ((par->objfunc_start - enderr)/enderr >= par->term_prec);
else
keep_iterating = 1;
first_fitpar = 0; /* for all iterations after the first iteration */
} else if (par->term_badmodel && (flags[0] || flags[1] || flags[2] ||
flags[3] || flags[4] || flags[5]) ) {
/* Just completed a full iteration, stop iterating because "term_
* badmodel" parameter is turned on and model has a fatal flaw: it
* extends beyond POS frame OR it one or more illegal photometric
* parameters OR it has one or more tiny or negative ellipsoid dia-
* meters OR it has plane-of-sky fit frames too small to "contain"
* model OR it is too wide in delay-Doppler space for (delay-)
* Doppler fit frames to be correctly constructed OR it has out-of-
* range values for one or more Doppler scaling factors */
keep_iterating = 0;
} else {
/* Just completed a full iteration and the model has no fatal flaws
* (or else the "term_badmodel" parameter is turned off): keep
* iterating if fractional decrease objective function during the
* just-completed iteration was greater than term_prec */
keep_iterating = ((beginerr - enderr)/enderr >= par->term_prec);
}
// } while (keep_iterating);
/* Show final values of reduced chi-square, individual penalty functions,
* and the objective function */
if (AF)
final_chi2 = chi2_cuda_af(dpar, ddat, 1, dat->nsets);
else if (STREAMS)
final_chi2 = chi2_cuda_streams(dpar, ddat, 1, dat->nsets);
else
final_chi2 = chi2_cuda(dpar, ddat, 1);
final_redchi2 = final_chi2/dat->dof;
printf("# search completed\n");
/* Launch single-thread kernel to get these final flags from dev->par:
* pen.n, baddiam, badphoto, posbnd, badposet, badradar, baddopscale */
/* Launch single-thread kernel to retrieve flags in dev_par */
bf_get_flags_krnl<<<1,1>>>(dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
deviceSyncAfterKernelLaunch("bf_get_flags_krnl");
if (par->pen.n > 0 || flags[0] || flags[1] || flags[2] || flags[3] ||
flags[4] || flags[5]) {
printf("#\n");
printf("# %15s %e\n", "reduced chi2", final_redchi2);
if (par->pen.n > 0) {
par->showstate = 1;
penalties_cuda(dpar, dmod, ddat);
par->showstate = 0;
}
if (flags[0])
printf("# objective func multiplied by %.1f: illegal ellipsoid diameters\n",
baddiam_factor);
if (flags[1])
printf("# objective func multiplied by %.1f: illegal photometric parameters\n",
badphoto_factor);
if (flags[2])
printf("# objective func multiplied by %.1f: model extends beyond POS frame\n",
posbnd_factor);
if (flags[3])
printf("# objective func multiplied by %.1f: "
"model extends beyond plane-of-sky fit image\n",
badposet_factor);
if (flags[4])
printf("# objective func multiplied by %.1f: "
"model is too wide in delay-Doppler space to construct fit image\n",
badradar_factor);
if (flags[5])
printf("# objective func multiplied by %.1f: illegal Doppler scaling factors\n",
baddopscale_factor);
printf("# ----------------------------\n");
printf("# %15s %e\n", "objective func", enderr);
printf("#\n");
}
intifpossible( dofstring, MAXLEN, dat->dof, SMALLVAL, "%f");
printf("# final chi2 = %e for %s dof (reduced chi2 = %f)\n",
final_chi2, dofstring, final_redchi2);
printf("#\n");
fflush(stdout);
cudaFree(sdev_par);
cudaFree(sdev_mod);
cudaFree(sdev_dat);
cudaFree(fparstep);
cudaFree(fpartol);
cudaFree(fparabstol);
cudaFree(fpartype);
cudaFree(fpntr);
cudaFree(flags);
cudaDeviceReset();
return enderr;
}
__host__ double bestfit_CUDA2(struct par_t *dpar, struct mod_t *dmod,
struct dat_t *ddat, struct par_t *par, struct mod_t *mod,
struct dat_t *dat)
{
char hostname[MAXLEN], dofstring[MAXLEN];
int i, iter=0, p, cntr, first_fitpar, partype, keep_iterating=1, ilaw, nf, term_maxiter;
long pid_long;
pid_t pid;
double beginerr, enderr, ax, bx, cx, obja, objb, objc, xmin, final_chi2,
final_redchi2, dummyval2, dummyval3, dummyval4, delta_delcor0,
dopscale_factor, radalb_factor, optalb_factor, *hfparstep, *hfpartol,
*hfparabstol, objfunc_start, term_prec;
unsigned char *flags, *hflags, *htype, *dtype, action, avoid_badpos, term_badmodel;
int nsets, *nframes, *lc_n, *nviews, nfpar, *hfpartype, npar_update, max_frames=0;
struct vertices_t **verts;
dim3 THD, BLK;
/* This section collects parameters used for CUDA kernel launches throughout
* the program. The cudaStreams created here are used/re-used for the
* lifetime of one program run */
nsets = dat->nsets;
nfpar = par->nfpar;
nf = mod->shape.comp[0].real.nf;
action = par->action;
npar_update = par->npar_update;
avoid_badpos = par->avoid_badpos;
objfunc_start = par->objfunc_start;
term_prec = par->term_prec;
term_badmodel = par->term_badmodel;
type = mod->shape.comp[0].type;
htype = (unsigned char *) malloc(nsets*sizeof(unsigned char));
nframes = (int *) malloc(nsets*sizeof(int));
lc_n = (int *) malloc(nsets*sizeof(int));
nviews = (int *) malloc(nsets*sizeof(int));
gpuErrchk(cudaMalloc((void**)&dtype, sizeof(unsigned char)*nsets));
gpuErrchk(cudaMalloc((void**)&verts, sizeof(struct vertices_t)*2));
for (int s=0; s<nsets; s++) {
htype[s] = dat->set[s].type;
switch (htype[s]) {
case DELAY:
nframes[s] = dat->set[s].desc.deldop.nframes;
nviews[s] = dat->set[s].desc.deldop.nviews;
lc_n[s] = 0;
break;
case DOPPLER:
nframes[s] = dat->set[s].desc.doppler.nframes;
nviews[s] = dat->set[s].desc.doppler.nviews;
lc_n[s] = 0;
break;
case POS:
nframes[s] = dat->set[s].desc.poset.nframes;
nviews[s] = dat->set[s].desc.poset.nviews;
lc_n[s] = 0;
break;
case LGHTCRV:
nframes[s] = dat->set[s].desc.lghtcrv.ncalc;
nviews[s] = dat->set[s].desc.lghtcrv.nviews;
lc_n[s] = dat->set[s].desc.lghtcrv.n;
break;
}
if (nframes[s]>max_frames) max_frames = nframes[s];
}
gpuErrchk(cudaMemcpy(dtype, htype, sizeof(unsigned char)*nsets,
cudaMemcpyHostToDevice));
cudaStream_t bf_stream[max_frames];
for (int f=0; f<max_frames; f++)
cudaStreamCreate(&bf_stream[f]);
/*..........................End section..................................*/
/* Get the hostname of host machine and the PID */
(void) gethostname(hostname, MAXLEN-1);
pid = getpid();
pid_long = (long) pid; /* Assumes pid_t fits in a long */
printf("#\n# CUDA fit (pid %ld on %s)\n", pid_long, hostname);
fflush(stdout);
/* Allocate memory for pointers, steps, and tolerances on bothhost and
* device. fpntr remains a cudaMallocManaged allocation because it is a
* double pointer. */
gpuErrchk(cudaMalloc((void**)&sdev_par, sizeof(struct par_t)));
gpuErrchk(cudaMemcpy(sdev_par, &par, sizeof(struct par_t), cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&sdev_mod, sizeof(struct mod_t)));
gpuErrchk(cudaMemcpy(sdev_mod, &mod, sizeof(struct mod_t), cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&sdev_dat, sizeof(struct dat_t)));
gpuErrchk(cudaMemcpy(sdev_dat, &dat, sizeof(struct dat_t), cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&flags, sizeof(unsigned char) * 7));
gpuErrchk(cudaMalloc((void**)&fparstep, sizeof(double) * nfpar));
gpuErrchk(cudaMalloc((void**)&fpartol, sizeof(double) * nfpar));
gpuErrchk(cudaMalloc((void**)&fparabstol, sizeof(double) * nfpar));
gpuErrchk(cudaMalloc((void**)&fpartype, sizeof(int) * nfpar));
cudaCalloc1((void**)&fpntr, sizeof(double*), nfpar);
hfparstep = (double *) malloc(nfpar*sizeof(double));
hfpartol = (double *) malloc(nfpar*sizeof(double));
hfparabstol = (double *) malloc(nfpar*sizeof(double));
hfpartype = (int *) malloc(nfpar*sizeof(int));
hflags = (unsigned char *) malloc(7*sizeof(unsigned char));
for (i=0; i<nfpar; i++)
gpuErrchk(cudaMalloc((void**)&fpntr[i], sizeof(double) * 1));
/* Set vertices shortcut */
set_verts_shortcut_krnl<<<1,1>>>(dmod, verts);
checkErrorAfterKernelLaunch("set_verts_shortcut_krnl");
/* Initialize static global pointers used by objective(x) below
to be compatible with "Numerical Recipes in C" routines */
spar = par; smod = mod; sdat = dat;
sdev_par = dpar; sdev_mod = dmod; sdev_dat = ddat;
/* Initialize static global parameters */
newsize = newshape = newspin = newphoto = newdelcor = newdopscale = newxyoff = 1;
deldop_zmax = deldop_zmax_save = 0.0;
cos_subradarlat = cos_subradarlat_save = 0.0;
rad_xsec = rad_xsec_save = 0.0;
opt_brightness = opt_brightness_save = 0.0;
vary_delcor0_size = (par->vary_delcor0 != VARY_NONE);
vary_delcor0_shapespin = (par->vary_delcor0 == VARY_ALL);
vary_dopscale_spin = (par->vary_dopscale != VARY_NONE);
vary_dopscale_sizeshape = (par->vary_dopscale == VARY_ALL);
vary_alb_size = (par->vary_radalb != VARY_NONE || par->vary_optalb != VARY_NONE);
vary_alb_shapespin = (par->vary_radalb == VARY_ALL || par->vary_optalb == VARY_ALL);
vary_hapke = 0;
if (par->vary_optalb != VARY_NONE)
for (ilaw=0; ilaw<mod->photo.noptlaws; ilaw++)
if (mod->photo.opttype[ilaw] == HAPKE || mod->photo.opttype[ilaw] == HARMHAPKE
|| mod->photo.opttype[ilaw] == INHOHAPKE)
vary_hapke = 1;
call_vary_params = (par->vary_delcor0 != VARY_NONE || par->vary_dopscale != VARY_NONE
|| par->vary_radalb != VARY_NONE
|| par->vary_optalb != VARY_NONE);
/* Initialize local parameters */
delta_delcor0 = 0.0;
dopscale_factor = radalb_factor = optalb_factor = 1.0;
/* The following call sets up the parameter lists allocated above and copy
* the device contents to host copies */
mkparlist_cuda2(dpar, dmod, ddat, fparstep, fpartol, fparabstol, fpartype,
fpntr, nfpar, nsets);
gpuErrchk(cudaMemcpy(hfparstep, fparstep, sizeof(double)*nfpar, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(hfpartol, fpartol, sizeof(double)*nfpar, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(hfparabstol, fparabstol, sizeof(double)*nfpar, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(hfpartype, fpartype, sizeof(int)*nfpar, cudaMemcpyDeviceToHost));
/* Compute deldop_zmax_save, cos_subradarlat_save, rad_xsec_save, and
* opt_brightness_save for the initial model */
if (call_vary_params)
{
realize_mod_cuda(dpar, dmod, type);
if (AF) {
realize_spin_cuda_af(dpar, dmod, ddat, nsets);
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
vary_params_af(dpar, dmod, ddat, action, &deldop_zmax_save,
&rad_xsec_save, &opt_brightness_save,
&cos_subradarlat_save, nsets);
}
else if (STREAMS) {
realize_spin_cuda_streams(dpar, dmod, ddat, nsets);
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
vary_params_cuda_streams2(dpar, dmod, ddat, action,
&deldop_zmax_save, &rad_xsec_save, &opt_brightness_save,
&cos_subradarlat_save, nsets);
}
else if (STREAMS2){
if (FLOAT)
realize_spin_cuda_streams2f(dpar, dmod, ddat, htype, nframes,
nviews, nsets, bf_stream);
else
realize_spin_cuda_streams2(dpar, dmod, ddat, htype, nframes, nviews,
nsets, bf_stream);
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
if (FLOAT)
vary_params_cuda_streams3f(dpar, dmod, ddat, action, &deldop_zmax_save,
&rad_xsec_save, &opt_brightness_save, &cos_subradarlat_save,
nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
bf_stream);
else
vary_params_cuda_streams3(dpar, dmod, ddat, action, &deldop_zmax_save,
&rad_xsec_save, &opt_brightness_save, &cos_subradarlat_save,
nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
bf_stream);
}
else {
realize_spin_cuda(dpar, dmod, ddat, nsets);
realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
vary_params_cuda(dpar, dmod, ddat, action, &deldop_zmax_save,
&rad_xsec_save,&opt_brightness_save,&cos_subradarlat_save,
nsets);
}
}
printf("rad_xsec: %f\n", rad_xsec_save);
printf("deldop_zmax: %f\n", (float)deldop_zmax_save);
/* Point hotparam to a dummy variable (dummyval) rather than to a model pa-
* rameter; then call objective(0.0) to set dummy variable = 0.0, realize
* the initial model, calculate the fits, return initial model's objective
* function as enderr. */
bf_set_hotparam_initial_krnl<<<1,1>>>();
checkErrorAfterKernelLaunch("bf_set_hotparam_initial_krnl");
if (STREAMS2)
enderr = objective_cuda_streams(0.0, verts, htype, dtype, nframes,
nviews, lc_n, nsets, nf, bf_stream);
else
enderr = objective_cuda(0.0);
printf("#\n# searching for best fit ...\n");
printf("%4d %8.6f to begin", 0, enderr);
/* Launch single-thread kernel to retrieve flags in dev_par */
/* flags[0] = dpar->baddiam;
flags[1] = dpar->badphoto;
flags[2] = dpar->posbnd;
flags[3] = dpar->badposet;
flags[4] = dpar->badradar;
flags[5] = dpar->baddopscale;*/
bf_get_flags_krnl<<<1,1>>>(dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
gpuErrchk(cudaMemcpy(hflags, flags, sizeof(unsigned char)*7,
cudaMemcpyDeviceToHost));
/* Now act on the flags just retrieved from dev_par */
if (hflags[0]) printf(" (BAD DIAMS)");
if (hflags[1]) printf(" (BAD PHOTO)");
if (hflags[2]) printf(" (BAD POS)");
if (hflags[3]) printf(" (BAD POSET)");
if (hflags[4]) printf(" (BAD RADAR)");
if (hflags[5]) printf(" (BAD DOPSCALE)"); printf("\n");
fflush(stdout);
/* Display the region within each delay-Doppler or Doppler frame that, ac-
* cording to initial model, has nonzero power. A warning is displayed if
* any region extends beyond the data limits: the vignetting is too tight,
* or else some model parameter (such as a delay correction polynomial co-
* efficient) is seriously in error. */
show_deldoplim_cuda_streams(ddat, htype, nsets, nframes, max_frames);
/* Set the starting fit parameter for the first iteration only */
first_fitpar = par->first_fitpar;
term_maxiter = par->term_maxiter;
if (first_fitpar < 0 || first_fitpar >= nfpar) {
printf("ERROR: need 0 <= first_fitpar < nparams (%d)\n", nfpar);
bailout("bestfit.c\n");
}
/* Iteratively adjust model; for each iteration, step through all free pa-
* rameters, adjusting one parameter at a time so as to minimize the objec-
* tive function at each step. Stop when fractional decrease in the objec-
* tive function from one iteration to the next is less than term_prec. */
// do {
showvals = 1; /* show reduced chi-square and penalties at beginning */
beginerr = enderr;
printf("# iteration %d %f", ++iter, beginerr);
/* Launch single-thread kernel to retrieve flags in dev_par */
bf_get_flags_krnl<<<1,1>>>(dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
gpuErrchk(cudaMemcpy(hflags, flags, sizeof(unsigned char)*7,
cudaMemcpyDeviceToHost));
/* Now act on the flags just retrieved from dev_par */
if (hflags[0]) printf(" (BAD DIAMS)");
if (hflags[1]) printf(" (BAD PHOTO)");
if (hflags[2]) printf(" (BAD POS)");
if (hflags[3]) printf(" (BAD POSET)");
if (hflags[4]) printf(" (BAD RADAR)");
if (hflags[5]) printf(" (BAD DOPSCALE)"); printf("\n");
fflush(stdout);
/* Show breakdown of chi-square by data type */
if (AF)
chi2_cuda_af(dpar, ddat, 1, nsets);
else if (STREAMS)
chi2_cuda_streams(dpar, ddat, 1, nsets);
else if (STREAMS2)
chi2_cuda_streams2(dpar, ddat, htype, dtype, nframes, lc_n, 1,
nsets, bf_stream);
else
chi2_cuda(dpar, ddat, 1);
/* Loop through the free parameters */
cntr = first_fitpar % npar_update;
p = first_fitpar = 1;
//for (p=first_fitpar; p<nfpar; p++) {
//p = first_fitpar;
/* Adjust only parameter p on this try */
bf_set_hotparam_pntr_krnl<<<1,1>>>(fpntr, fpartype, p);
checkErrorAfterKernelLaunch("bf_set_hotparam_pntr_krnl");
gpuErrchk(cudaMemcpyFromSymbol(&partype, bf_partype, sizeof(int),
0, cudaMemcpyDeviceToHost));
newsize = newshape = newspin = newphoto = newdelcor = newdopscale
= newxyoff = 0;
if (partype == SIZEPAR) newsize = 1;
else if (partype == SHAPEPAR) newshape = 1;
else if (partype == SPINPAR) newspin = 1;
else if (partype == PHOTOPAR) newphoto = 1;
else if (partype == DELCORPAR) newdelcor = 1;
else if (partype == DOPSCALEPAR) newdopscale = 1;
else if (partype == XYOFFPAR) newxyoff = 1;
/* If this is a size parameter AND model extends beyond POS frame
* AND the "avoid_badpos" parameter is turned on, shrink model by
* 5% at a time until it fits within the POS frame.
* We must start with the redundant model evaluation for the un-
* changed value of the size parameter, in case the first call to
* objective displays reduced chi-square and the penalty functions. */
if (avoid_badpos && partype == SIZEPAR) {
bf_get_flags_krnl<<<1,1>>>(dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
gpuErrchk(cudaMemcpy(hflags, flags, sizeof(unsigned char)*7,
cudaMemcpyDeviceToHost));
/* Get value of (*hotparam) */
bf_get_hotparam_val_krnl<<<1,1>>>();
checkErrorAfterKernelLaunch("bf_get_hotparam_val_krnl");
gpuErrchk(cudaMemcpyFromSymbol(&hotparamval, bf_hotparamval,
sizeof(double), 0, cudaMemcpyDeviceToHost));
while (hflags[2]) {
if (STREAMS2)
objective_cuda_streams(hotparamval, verts, htype, dtype,
nframes, nviews, lc_n, nsets, nf, bf_stream);
else
objective_cuda(hotparamval);
bf_get_flags_krnl<<<1,1>>>(dpar, flags);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
gpuErrchk(cudaMemcpy(hflags, flags, sizeof(unsigned char)*7,
cudaMemcpyDeviceToHost));
if (hflags[2]) {
/* Set the value pointed to by hotparam to 0.95 of its
* previous value */
bf_mult_hotparam_val_krnl<<<1,1>>>(0.95);
checkErrorAfterKernelLaunch("bf_mult_hotparam_val_krnl");
}
}
}
/* Get value of (*hotparam) so that mnbrak can use it*/
bf_get_hotparam_val_krnl<<<1,1>>>();
checkErrorAfterKernelLaunch("bf_get_hotparam_val_krnl");
gpuErrchk(cudaMemcpyFromSymbol(&hotparamval, bf_hotparamval,
sizeof(double), 0, cudaMemcpyDeviceToHost));
/* Use Numerical Recipes routine mnbrak to bracket a minimum in the
* objective function (reduced chi-square plus penalties) objec-
* tive(x), where x is the value of parameter p. As initial trial
* parameter values, use ax (unadjusted value) and bx, that value
* incremented by the appropriate step size (length_step,spin_step,
* etc.). mnbrak returns 3 parameter values, with bx between ax
* and cx; note that ax and bx are changed from their input values.
* It also returns the 3 corresponding objective(x) values, where
* objb is less than obja and objc. Hence there is at least one
* local minimum (but not necessarily *any* global minimum)
* somewhere between ax and cx. */
ax = hotparamval;
bx = ax + hfparstep[p]; /* par usage us fine here */
if (STREAMS2)
mnbrak_streams(&ax, &bx, &cx, &obja, &objb, &objc,
objective_cuda_streams, verts, htype, dtype, nframes,
nviews, lc_n, nsets, nf, bf_stream);
else
mnbrak( &ax, &bx, &cx, &obja, &objb, &objc, objective_cuda);
/* Before homing in on local minimum, initialize flags that will
* tell us if model extended beyond POS frame (sky rendering) for
* any trial parameter value(s), if it extended beyond any POS ima-
* ges, and if it was too wide in delay-Doppler space */
check_posbnd = 0;
check_badposet = 0;
check_badradar = 0;
/* Now use Numerical Recipes function brent to find local minimum -
* that is, to find xmin, the best value of x, to within the
* *fractional* tolerance specified for parameter p (length_tol,
* spin_tol, etc.). brent's return value is the minimized objective
* function, objective(xmin). If more than one local minimum bet-
* ween ax and cx, brent might not find the best one. brent_abs is
* a modified version of brent that has an absolute fitting tole-
* rance as one of its arguments, in addition to the existing
* fractional tolerance. */
if (STREAMS2)
enderr = brent_abs_streams(ax, bx, cx, objective_cuda_streams, hfpartol[p],
hfparabstol[p], &xmin, verts, htype, dtype, nframes, nviews, lc_n,
nsets, nf, bf_stream);
else
enderr = brent_abs( ax, bx, cx, objective_cuda,
fpartol[p], fparabstol[p], &xmin);
//
// /* Realize whichever part(s) of the model has changed.
// *
// * The code here is somewhat opaque because more than one part of
// * the model may have changed - if the "vary_delcor0" "vary_radalb"
// * and/or "vary_optalb" parameter is being used to permit joint pa-
// * rameter adjustments. Before calling the vary_params routine, the
// * size/shape and spin states must be realized (realize_mod and
// * realize_spin); if albedos are being varied jointly with other
// * parameters, the photometric state must also be realized
// * (realize_photo); and in either case the 0th-order delay correc-
// * tion polynomial coefficients must be reset to their saved
// * values via the appropriate call to realize_delcor. */
// /* Set the value pointed to by hotparam to 0.95 of its
// * previous value (*hotparam) = xmin; */
// bf_set_hotparam_val_krnl<<<1,1>>>(xmin);
// checkErrorAfterKernelLaunch("bf_set_hotparam_val_krnl");
// gpuErrchk(cudaMemcpyFromSymbol(&hotparamval, bf_hotparamval,
// sizeof(double), 0, cudaMemcpyDeviceToHost));
//
// if (newsize || newshape)
// realize_mod_cuda(dpar, dmod, type);
// if (newspin) {
// if (AF)
// realize_spin_cuda_af(dpar, dmod, ddat, nsets);
// else if (STREAMS)
// realize_spin_cuda_streams(dpar, dmod, ddat, nsets);
// else if (STREAMS2)
// realize_spin_cuda_streams2(dpar, dmod, ddat, htype, nframes,
// nviews, nsets, bf_stream);
// else
// realize_spin_cuda(dpar, dmod, ddat, nsets);
// }
// if ((newsize && vary_alb_size) || ((newshape ||
// newspin) && vary_alb_shapespin))
// realize_photo_cuda(dpar, dmod, 1.0, 1.0, 1); /* set R to R_save */
// if ((newsize && vary_delcor0_size) || ((newshape || newspin)
// && vary_delcor0_shapespin)) {
// if (FLOAT)
// realize_delcor_cuda_f(ddat, 0.0, 1, nsets, htype, nframes); /* set delcor0 to delcor0_save */
// else
// realize_delcor_cuda(ddat, 0.0, 1, nsets); /* set delcor0 to delcor0_save */
// }
// if ((newspin && vary_dopscale_spin) || ((newsize || newshape)
// && vary_dopscale_sizeshape))
// realize_dopscale_cuda_streams(dpar, ddat, 1.0, 1, nsets, dtype); /* set dopscale to dopscale_save */
// if (call_vary_params) {
// /* Call vary_params to get the adjustments to 0th-order delay
// * correction polynomial coefficients, to Doppler scaling fac-
// * tors, and to radar and optical albedos */
// if (AF)
// vary_params_af(dpar,dmod,ddat, 11, &deldop_zmax,&rad_xsec,
// &opt_brightness,&cos_subradarlat, nsets);
// if (STREAMS)
// vary_params_cuda_streams2(dpar,dmod,ddat,11, &deldop_zmax,
// &rad_xsec,&opt_brightness,&cos_subradarlat,nsets);
// else if (STREAMS2)
// vary_params_cuda_streams3(dpar,dmod,ddat,11,&deldop_zmax,
// &rad_xsec, &opt_brightness, &cos_subradarlat,
// nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
// bf_stream);
// else //11 - this used to be MPI_SETPAR_VARY
// vary_params_cuda(dpar,dmod,ddat,11,&deldop_zmax,&rad_xsec,
// &opt_brightness, &cos_subradarlat, nsets);
//
// delta_delcor0 = (deldop_zmax - deldop_zmax_save)*KM2US;
// if (cos_subradarlat != 0.0)
// dopscale_factor = cos_subradarlat_save/cos_subradarlat;
// if (rad_xsec != 0.0)
// radalb_factor = rad_xsec_save/rad_xsec;
// if (opt_brightness != 0.0)
// optalb_factor = opt_brightness_save/opt_brightness;
// }
// if ((newsize && vary_alb_size) || ((newshape || newspin) &&
// vary_alb_shapespin)) {
// realize_photo_cuda(dpar, dmod, radalb_factor, optalb_factor, 2); /* reset R, then R_save */
//
// /* Must update opt_brightness_save for Hapke optical scattering
// * law, since single-scattering albedo w isn't just an overall
// * scaling factor */
// if (vary_hapke) {
// if (AF)
// vary_params_af(dpar,dmod,ddat,12,&dummyval2,&dummyval3,
// &opt_brightness_save,&dummyval4, nsets);
// if (STREAMS)
// vary_params_cuda_streams2(dpar,dmod,ddat,12, &dummyval2,
// &dummyval3,&opt_brightness,&dummyval4,nsets);
// else if (STREAMS2)
// vary_params_cuda_streams3(dpar,dmod,ddat,12,&dummyval2,
// &dummyval3,&opt_brightness,&dummyval4,
// nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
// bf_stream);
// else // used to be MPI_SETPAR_HAPKE
// vary_params_cuda(dpar,dmod,ddat,12,&dummyval2,&dummyval3,
// &opt_brightness_save, &dummyval4, nsets);
// }
// } else if (newphoto) {
// rad_xsec_save = rad_xsec;
// opt_brightness_save = opt_brightness;
// realize_photo_cuda(dpar, dmod, 1.0, 1.0, 0); /* set R_save to R */
// }
// if ((newsize && vary_delcor0_size) || ((newshape || newspin) &&
// vary_delcor0_shapespin)) {
// deldop_zmax_save = deldop_zmax;
// if (FLOAT)
// realize_delcor_cuda_f(ddat, delta_delcor0, 2, nsets, htype, nframes); /* reset delcor0, then delcor0_save */
// else
// realize_delcor_cuda(ddat, delta_delcor0, 2, nsets); /* reset delcor0, then delcor0_save */
// } else if (newdelcor) {
// if (FLOAT)
// realize_delcor_cuda_f(ddat, 0.0, 0, nsets, htype, nframes); /* set delcor0_save to delcor0 */
// else
// realize_delcor_cuda(ddat, 0.0, 0, nsets); /* set delcor0_save to delcor0 */
// }
// if ((newspin && vary_dopscale_spin) || ((newsize || newshape) &&
// vary_dopscale_sizeshape)) {
// cos_subradarlat_save = cos_subradarlat;
// realize_dopscale_cuda_streams(dpar, ddat, dopscale_factor, 2, nsets, dtype); /* reset dopscale, then dopscale_save */
// } else if (newdopscale) {
// realize_dopscale_cuda_streams(dpar, ddat, 1.0, 0, nsets, dtype); /* set dopscale_save to dopscale */
// }
// if (newxyoff)
// realize_xyoff_cuda_streams(ddat, nsets, dtype);
//
// /* If the model extended beyond POS frame (sky rendering) for any
// * trial parameter value(s), if it extended beyond any plane-of-
// * sky fit frames, or if it was too wide in delay-Doppler space,
// * evaluate model for best-fit parameter value to check if these
// * problems persist - that is, to update "posbnd" "badposet" and
// * "badradar" parameters for updated model.
// * (This needn't be done for "baddiam" "badphoto" flags: if we've
// * just finished adjusting an ellipsoid dimension or photometric
// * parameter, realize_mod or realize_photo was called in code block
// * above in order to realize the changed portion of model, and that
// * call updated corresponding flag. Also we needn't worry about the
// * "baddopscale" flag, since realize_dopscale was called above if
// * Doppler scaling factors were changed.) The call to objective
// * (*hotparam) first sets *hotparam (the parameter that we just
// * adjusted) equal to itself (i.e., no change) and then calls
// * calc_fits to evaluate the model for all datasets. */
// if (check_posbnd || check_badposet || check_badradar) {
// if (STREAMS2)
// objective_cuda_streams(hotparamval, verts, htype, dtype,
// nframes, nviews, lc_n, nsets, nf, bf_stream);
// else
// objective_cuda(hotparamval);//(*hotparam);
// }
//
// /* Launch single-thread kernel to retrieve flags in dev_par */
// bf_get_flags_krnl<<<1,1>>>(dpar, flags);
// checkErrorAfterKernelLaunch("bf_get_flags_krnl");
// gpuErrchk(cudaMemcpy(hflags, flags, sizeof(unsigned char)*7,
// cudaMemcpyDeviceToHost));
// /* Display the objective function after each parameter adjustment. */
// printf("%4d %8.6f %d", p, enderr, iround(par->fpartype[p]));
// if (hflags[0]) printf(" (BAD DIAMS)");
// if (hflags[1]) printf(" (BAD PHOTO)");
// if (hflags[2]) printf(" (BAD POS)");
// if (hflags[3]) printf(" (BAD POSET)");
// if (hflags[4]) printf(" (BAD RADAR)");
// if (hflags[5]) printf(" (BAD DOPSCALE)");
// printf("\n");
// fflush(stdout);
//
// /* Display reduced chi-square and individual penalty values after
// * every 20th parameter adjustment. Setting showvals to 1 here
// * means that these things will be displayed next time objective(x)
// * is evaluated - at start of NEXT parameter adjustment. Specifi-
// * cally, they will be displayed when routine mnbrak evaluates
// * objective(x) for *unadjusted* parameter value ax (see comment
// * above).
// * Also rewrite model and obs files after every 20th parameter
// * adjustment. Most of obs file doesn't change, but some floating
// * parameters (i.e. delay correction polynomial coefficients) do. */
// if (++cntr >= npar_update) {
// cntr = 0;
// showvals = 1;
// if (AF) {
// calc_fits_cuda_af(dpar, dmod, ddat);
// chi2_cuda_af(dpar, ddat, 0, nsets);
// }
// else if (STREAMS) {
// calc_fits_cuda_streams(dpar, dmod, ddat);
// chi2_cuda_streams(dpar, ddat, 0, nsets);
// }
// else if (STREAMS2) {
// calc_fits_cuda_streams2(dpar, dmod, ddat, verts, nviews,
// nframes, lc_n, htype, nsets, nf, bf_stream);
// chi2_cuda_streams2(dpar, ddat, htype, dtype, nframes,
// lc_n, 0, nsets, bf_stream);
// }
// else {
// calc_fits_cuda(dpar, dmod, ddat);
// chi2_cuda(dpar, ddat, 0);
// }
// //write_mod( par, mod);
// //write_dat( par, dat);
// }
// }
//
// /* End of this iteration: Write model and data to disk, and display the
// * region within each delay-Doppler or Doppler frame for which model
// * power is nonzero. */
// if (cntr != 0) {
// if (AF){
// calc_fits_cuda_af(dpar, dmod, ddat);
// chi2_cuda_af(dpar, ddat, 0, nsets);
// }
// else if (STREAMS) {
// calc_fits_cuda_streams(dpar, dmod, ddat);
// chi2_cuda_streams(dpar, ddat, 0, nsets);
// }
// else if (STREAMS2) {
// calc_fits_cuda_streams2(dpar, dmod, ddat, verts, nviews,
// nframes, lc_n, htype, nsets, nf, bf_stream);
// chi2_cuda_streams2(dpar, ddat, htype, dtype, nframes,
// lc_n, 0, nsets, bf_stream);
// }
// else {
// calc_fits_cuda(dpar, dmod, ddat);
// chi2_cuda(dpar, ddat, 0);
// }
// //write_mod( par, mod);
// //write_dat( par, dat);
// }
// show_deldoplim_cuda_streams(ddat, htype, nsets, nframes, max_frames);
//
// /* Check if we should start a new iteration */
// if (iter == term_maxiter) {
// /* Just completed last iteration permitted by "term_maxiter" para-
// * meter, so stop iterating; note that since iter is 1-based, this
// * test is always false if "term_maxiter" = 0 (its default value) */
// keep_iterating = 0;
//
// } else if (first_fitpar > 0) {
// /* Just completed partial iteration (possible for iteration 1): if
// * "objfunc_start" parameter was given, check if fractional decrea-
// * se in objective function *relative to objfunc_start* during the
// * just-completed iteration was larger than term_prec, thus
// * justifying a new iteration; if it wasn't specified, definitely
// * proceed to a new iteration. */
// if (objfunc_start > 0.0)
// keep_iterating = ((objfunc_start - enderr)/enderr >= term_prec);
// else
// keep_iterating = 1;
// first_fitpar = 0; /* for all iterations after the first iteration */
//
// } else if (term_badmodel && (hflags[0] || hflags[1] || hflags[2] ||
// hflags[3] || hflags[4] || hflags[5]) ) {
//
// /* Just completed a full iteration, stop iterating because "term_
// * badmodel" parameter is turned on and model has a fatal flaw: it
// * extends beyond POS frame OR it one or more illegal photometric
// * parameters OR it has one or more tiny or negative ellipsoid dia-
// * meters OR it has plane-of-sky fit frames too small to "contain"
// * model OR it is too wide in delay-Doppler space for (delay-)
// * Doppler fit frames to be correctly constructed OR it has out-of-
// * range values for one or more Doppler scaling factors */
// keep_iterating = 0;
//
// } else {
// /* Just completed a full iteration and the model has no fatal flaws
// * (or else the "term_badmodel" parameter is turned off): keep
// * iterating if fractional decrease objective function during the
// * just-completed iteration was greater than term_prec */
// keep_iterating = ((beginerr - enderr)/enderr >= term_prec);
// }
//
// } while (keep_iterating);
//
// /* Show final values of reduced chi-square, individual penalty functions,
// * and the objective function */
// if (AF)
// final_chi2 = chi2_cuda_af(dpar, ddat, 1, nsets);
// else if (STREAMS)
// final_chi2 = chi2_cuda_streams(dpar, ddat, 1, nsets);
// else if (STREAMS2)
// final_chi2 = chi2_cuda_streams2(dpar, ddat, htype, dtype, nframes,
// lc_n, 1, nsets, bf_stream);
// else
// final_chi2 = chi2_cuda(dpar, ddat, 1);
// final_redchi2 = final_chi2/dat->dof;
// printf("# search completed\n");
//
// /* Launch single-thread kernel to get these final flags from dev->par:
// * pen.n, baddiam, badphoto, posbnd, badposet, badradar, baddopscale */
// /* Launch single-thread kernel to retrieve flags in dev_par */
// bf_get_flags_krnl<<<1,1>>>(dpar, flags);
// checkErrorAfterKernelLaunch("bf_get_flags_krnl");
// gpuErrchk(cudaMemcpy(hflags, flags, sizeof(unsigned char)*7,
// cudaMemcpyDeviceToHost));
//
// if (par->pen.n > 0 || hflags[0] || hflags[1] || hflags[2] || hflags[3] ||
// hflags[4] || hflags[5]) {
// printf("#\n");
// printf("# %15s %e\n", "reduced chi2", final_redchi2);
// if (par->pen.n > 0) {
// par->showstate = 1;
// penalties_cuda(dpar, dmod, ddat);
// par->showstate = 0;
// }
// if (hflags[0])
// printf("# objective func multiplied by %.1f: illegal ellipsoid diameters\n",
// baddiam_factor);
// if (hflags[1])
// printf("# objective func multiplied by %.1f: illegal photometric parameters\n",
// badphoto_factor);
// if (hflags[2])
// printf("# objective func multiplied by %.1f: model extends beyond POS frame\n",
// posbnd_factor);
// if (hflags[3])
// printf("# objective func multiplied by %.1f: "
// "model extends beyond plane-of-sky fit image\n",
// badposet_factor);
// if (hflags[4])
// printf("# objective func multiplied by %.1f: "
// "model is too wide in delay-Doppler space to construct fit image\n",
// badradar_factor);
// if (hflags[5])
// printf("# objective func multiplied by %.1f: illegal Doppler scaling factors\n",
// baddopscale_factor);
// printf("# ----------------------------\n");
// printf("# %15s %e\n", "objective func", enderr);
// printf("#\n");
// }
intifpossible( dofstring, MAXLEN, dat->dof, SMALLVAL, "%f");
printf("# final chi2 = %e for %s dof (reduced chi2 = %f)\n",
final_chi2, dofstring, final_redchi2);
printf("#\n");
fflush(stdout);
/* Destroy the streams */
for (int f=0; f<max_frames; f++)
cudaStreamDestroy(bf_stream[f]);
//free(hflags);
free(htype);
free(nframes);
free(lc_n);
free(nviews);
//free(hfparstep);
// free(hfpartol);
// free(hfparabstol);
// free(fpartype);
cudaFree(sdev_par);
cudaFree(sdev_mod);
cudaFree(sdev_dat);
cudaFree(fparstep);
cudaFree(fpartol);
cudaFree(fparabstol);
cudaFree(fpartype);
cudaFree(fpntr);
cudaFree(flags);
cudaFree(dtype);
cudaFree(verts);
cudaDeviceReset();
return enderr;
}
/* objective(x) is the objective function, with x the value of the one
model parameter that is being adjusted at the moment by bestfit.
Other parameters on which objective depends must be placed in static
variables at the top of this file, for compatibility with Numerical
Recipes routines mnbrak and brent (which search for minima of a
function of *one* variable).
objective(x) also displays reduced chi-square and the individual
penalty values if bestfit has set showvals = 1. It then resets
showvals to 0 after displaying these quantities. */
__host__ double objective_cuda( double x)
{
double err, pens, delta_delcor0, dopscale_factor, radalb_factor,
optalb_factor;
/* Initialize local parameters */
delta_delcor0 = 0.0;
dopscale_factor = radalb_factor = optalb_factor = 1.0;
/* Assign new trial value to the model parameter being adjusted */
bf_set_hotparam_val_krnl<<<1,1>>>(x); //(*hotparam) = x;
checkErrorAfterKernelLaunch("bf_set_hotparam_val_krnl (in objective_cuda)");
/* Realize whichever part(s) of the model have changed, then calculate root's
* contribution to chi-square.
* The code here is somewhat opaque because more than one part of the model
* may have changed - if the "vary_delcor0" "vary_dopscale" "vary_radalb" and
* /or "vary_optalb" parameter is being used to permit joint parameter ad-
* justments. Before calling the vary_params routine, the size/shape and spin
* states must be realized (realize_mod and realize_spin); if albedos are
* being varied jointly with other parameters, the photometric state must
* also be realized (realize_photo); and in either case the 0th-order delay
* correction polynomial coefficients and the Doppler scaling factors must be
* reset to their saved values via the appropriate calls to realize_delcor
* and realize_dopscale, respectively.*/
if (newsize || newshape)
realize_mod_cuda(sdev_par, sdev_mod, type);
if (newspin) {
if (AF)
realize_spin_cuda_af(sdev_par, sdev_mod, sdev_dat, sdat->nsets);
else if (STREAMS)
realize_spin_cuda_streams(sdev_par, sdev_mod, sdev_dat, sdat->nsets);
else
realize_spin_cuda(sdev_par, sdev_mod, sdev_dat, sdat->nsets); }
if ((newsize && vary_alb_size) || ((newshape || newspin) && vary_alb_shapespin))
realize_photo_cuda(sdev_par, sdev_mod, 1.0, 1.0, 1); /* set R to R_save */
if ((newsize && vary_delcor0_size) || ((newshape || newspin) && vary_delcor0_shapespin))
realize_delcor_cuda(sdev_dat, 0.0, 1, sdat->nsets); /* set delcor0 to delcor0_save */
if ((newspin && vary_dopscale_spin) || ((newsize || newshape) && vary_dopscale_sizeshape))
realize_dopscale_cuda(sdev_par, sdev_dat, 1.0, 1); /* set dopscale to dopscale_save */
if (call_vary_params) {
/* Call vary_params to get the trial adjustments to 0th-order delay correc-
* tion polynomial coefficients, to Doppler scaling factors,and to radar
* and optical albedos, then send them to the branch nodes */
if (AF)
vary_params_af(sdev_par,sdev_mod,sdev_dat,spar->action,
&deldop_zmax,&rad_xsec,&opt_brightness,&cos_subradarlat,
sdat->nsets);
else if (STREAMS)
// vary_params_cuda_streams(sdev_par, sdev_mod, sdev_dat, spar->action,
// &deldop_zmax,&rad_xsec,&opt_brightness,&cos_subradarlat,
// sdat->nsets);
vary_params_cuda_streams2(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness, &cos_subradarlat,
sdat->nsets);
else
vary_params_cuda(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness,
&cos_subradarlat, sdat->nsets);
delta_delcor0 = (deldop_zmax - deldop_zmax_save)*KM2US;
if (cos_subradarlat != 0.0)
dopscale_factor = cos_subradarlat_save/cos_subradarlat;
if (rad_xsec != 0.0)
radalb_factor = rad_xsec_save/rad_xsec;
if (opt_brightness != 0.0)
optalb_factor = opt_brightness_save/opt_brightness;
}
if ((newsize && vary_alb_size) || ((newshape || newspin) && vary_alb_shapespin))
realize_photo_cuda(sdev_par, sdev_mod, radalb_factor, optalb_factor, 1); /* adjust R */
else if (newphoto)
realize_photo_cuda(sdev_par, sdev_mod, 1.0, 1.0, 0); /* set R_save to R */
if ((newsize && vary_delcor0_size) || ((newshape || newspin) && vary_delcor0_shapespin))
realize_delcor_cuda(sdev_dat, delta_delcor0, 1, sdat->nsets); /* adjust delcor0 */
else if (newdelcor)
realize_delcor_cuda(sdev_dat, 0.0, 0, sdat->nsets); /* set delcor0_save to delcor0 */
if ((newspin && vary_dopscale_spin) || ((newsize || newshape) && vary_dopscale_sizeshape))
realize_dopscale_cuda(sdev_par, sdev_dat, dopscale_factor, 1); /* adjust dopscale */
else if (newdopscale)
realize_dopscale_cuda(sdev_par, sdev_dat, 1.0, 0); /* set dopscale_save to dopscale */
if (newxyoff)
realize_xyoff_cuda(sdev_dat);
if (AF) {
calc_fits_cuda_af(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda_af(sdev_par, sdev_dat, 0, sdat->nsets);
}
else if (STREAMS) {
calc_fits_cuda_streams(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda_streams(sdev_par, sdev_dat, 0, sdat->nsets);
}
else {
calc_fits_cuda(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda(sdev_par, sdev_dat, 0);
}
/* Divide chi-square by DOF to get reduced chi-square. */
err /= sdat->dof;
/* If bestfit has set showvals = 1, display reduced chi-square. Then set
* spar->showstate = 1, so that when function penalties is called later,
* it "knows" that it should display the individual penalty values.
* Reset showstate to 0 if showvals = 0. */
if (showvals) {
printf("# %15s %e\n", "reduced chi2", err);
spar->showstate = 1;
}
else
spar->showstate = 0;
/* Compute penalties and add to reduced chi-square. Individual penalty values
* will be displayed if we set spar->showstate = 1 a few lines back. */
pens = penalties_cuda(sdev_par, sdev_mod, sdev_dat);
err += pens;
/* Double the objective function if there's an ellipsoid component with tiny
* or negative diameter, if any optical photometric parameters have invalid
* values, if any portion of the model lies outside specified POS window or
* outside any plane-of-sky fit image, or if model is too wide in delay-Dopp-
* ler space for any (delay-)Doppler fit image to be correctly constructed.
* This effectively rules out any models with any of these flaws. */
/* NOTE: TO-DO: baddiam may need to come from elsewhere other than spar.
* However, bestfit gets called only once and spar/smod/sdat gets copied
* only once. */
if (spar->baddiam) {
baddiam_factor = spar->bad_objfactor * exp(spar->baddiam_logfactor);
err *= baddiam_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal ellipsoid diameters\n",
baddiam_factor);
}
if (spar->badphoto) {
badphoto_factor = spar->bad_objfactor * exp(spar->badphoto_logfactor);
err *= badphoto_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal photometric parameters\n",
badphoto_factor);
}
if (spar->posbnd) {
check_posbnd = 1; /* tells bestfit about this problem */
posbnd_factor = spar->bad_objfactor * exp(spar->posbnd_logfactor);
err *= posbnd_factor;
if (showvals)
printf("# objective func multiplied by %.1f: model extends beyond POS frame\n",
posbnd_factor);
}
if (spar->badposet) {
check_badposet = 1; /* tells bestfit about this problem */
badposet_factor = spar->bad_objfactor * exp(spar->badposet_logfactor);
err *= badposet_factor;
if (showvals)
printf("# objective func multiplied by %.1f: plane-of-sky fit frame too small\n",
badposet_factor);
}
if (spar->badradar) {
check_badradar = 1; /* tells bestfit about this problem */
badradar_factor = spar->bad_objfactor * exp(spar->badradar_logfactor);
err *= badradar_factor;
if (showvals)
printf("# objective func multiplied by %.1f: model too wide in delay-Doppler space\n",
badradar_factor);
}
if (spar->baddopscale) {
baddopscale_factor = spar->bad_objfactor * exp(spar->baddopscale_logfactor);
err *= baddopscale_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal Doppler scaling factors\n",
baddopscale_factor);
}
/* Reset showvals to 0 if it had been 1 (i.e., turn off display of reduced
* chi-square and the individual penalty values). */
if (showvals)
fflush( stdout);
showvals = 0;
return err;
}
/* objective_cuda_streams is a version of objective_cuda that takes an extra
* argument - the cudaStreams created in bestfit_cuda2. The goal is to
* reduce overhead from stream creation/destruction to a minimum by having
* just one set number of streams per program run. */
__host__ double objective_cuda_streams(
double x,
struct vertices_t **verts,
unsigned char *htype,
unsigned char *dtype,
int *nframes,
int *nviews,
int *lc_n,
int nsets,
int nf,
cudaStream_t *bf_stream)
{
double err, pens, delta_delcor0, dopscale_factor, radalb_factor,
optalb_factor, *dlogfactors, *hlogfactors;
unsigned char *dflags, *hflags;
gpuErrchk(cudaMalloc((void**)&dflags, sizeof(unsigned char)*7));
gpuErrchk(cudaMalloc((void**)&dlogfactors, sizeof(double)*7));
hflags = (unsigned char *) malloc(7*sizeof(unsigned char));
hlogfactors = (double *) malloc(7*sizeof(double));
/* Initialize local parameters */
delta_delcor0 = 0.0;
dopscale_factor = radalb_factor = optalb_factor = 1.0;
/* Assign new trial value to the model parameter being adjusted */
bf_set_hotparam_val_krnl<<<1,1>>>(x); //(*hotparam) = x;
checkErrorAfterKernelLaunch("bf_set_hotparam_val_krnl (in objective_cuda)");
/* Realize whichever part(s) of the model have changed, then calculate root's
* contribution to chi-square.
* The code here is somewhat opaque because more than one part of the model
* may have changed - if the "vary_delcor0" "vary_dopscale" "vary_radalb" and
* /or "vary_optalb" parameter is being used to permit joint parameter ad-
* justments. Before calling the vary_params routine, the size/shape and spin
* states must be realized (realize_mod and realize_spin); if albedos are
* being varied jointly with other parameters, the photometric state must
* also be realized (realize_photo); and in either case the 0th-order delay
* correction polynomial coefficients and the Doppler scaling factors must be
* reset to their saved values via the appropriate calls to realize_delcor
* and realize_dopscale, respectively.*/
if (newsize || newshape)
realize_mod_cuda(sdev_par, sdev_mod, type);
if (newspin) {
if (AF)
realize_spin_cuda_af(sdev_par, sdev_mod, sdev_dat, nsets);
else if (STREAMS)
realize_spin_cuda_streams(sdev_par, sdev_mod, sdev_dat, nsets);
else if (STREAMS2) {
if (FLOAT)
realize_spin_cuda_streams2f(sdev_par, sdev_mod, sdev_dat, htype, nframes,
nviews, nsets, bf_stream);
else
realize_spin_cuda_streams2(sdev_par, sdev_mod, sdev_dat, htype, nframes,
nviews, nsets, bf_stream);
}
else
realize_spin_cuda(sdev_par, sdev_mod, sdev_dat, nsets);
}
if ((newsize && vary_alb_size) || ((newshape || newspin) && vary_alb_shapespin))
realize_photo_cuda(sdev_par, sdev_mod, 1.0, 1.0, 1); /* set R to R_save */
if ((newsize && vary_delcor0_size) || ((newshape || newspin) && vary_delcor0_shapespin)) {
if (FLOAT)
realize_delcor_cuda_f(sdev_dat, 0.0, 1, nsets, htype, nframes); /* set delcor0 to delcor0_save */
else
realize_delcor_cuda(sdev_dat, 0.0, 1, nsets); /* set delcor0 to delcor0_save */
}
if ((newspin && vary_dopscale_spin) || ((newsize || newshape) && vary_dopscale_sizeshape))
realize_dopscale_cuda_streams(sdev_par, sdev_dat, 1.0, 1, nsets, dtype); /* set dopscale to dopscale_save */
if (call_vary_params) {
/* Call vary_params to get the trial adjustments to 0th-order delay correc-
* tion polynomial coefficients, to Doppler scaling factors,and to radar
* and optical albedos, then send them to the branch nodes */
if (AF)
vary_params_af(sdev_par,sdev_mod,sdev_dat,spar->action,
&deldop_zmax,&rad_xsec,&opt_brightness,&cos_subradarlat,
nsets);
else if (STREAMS)
vary_params_cuda_streams2(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness, &cos_subradarlat,
nsets);
else if (STREAMS2) {
if (FLOAT)
vary_params_cuda_streams3f(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness, &cos_subradarlat,
nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
bf_stream);
else
vary_params_cuda_streams3(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness, &cos_subradarlat,
nframes, lc_n, nviews, verts, htype, dtype, nf, nsets,
bf_stream); }
else
vary_params_cuda(sdev_par, sdev_mod, sdev_dat, spar->action,
&deldop_zmax, &rad_xsec, &opt_brightness,
&cos_subradarlat, nsets);
delta_delcor0 = (deldop_zmax - deldop_zmax_save)*KM2US;
if (cos_subradarlat != 0.0)
dopscale_factor = cos_subradarlat_save/cos_subradarlat;
if (rad_xsec != 0.0)
radalb_factor = rad_xsec_save/rad_xsec;
if (opt_brightness != 0.0)
optalb_factor = opt_brightness_save/opt_brightness;
}
if ((newsize && vary_alb_size) || ((newshape || newspin) && vary_alb_shapespin))
realize_photo_cuda(sdev_par, sdev_mod, radalb_factor, optalb_factor, 1); /* adjust R */
else if (newphoto)
realize_photo_cuda(sdev_par, sdev_mod, 1.0, 1.0, 0); /* set R_save to R */
if ((newsize && vary_delcor0_size) || ((newshape || newspin) && vary_delcor0_shapespin)) {
if (FLOAT)
realize_delcor_cuda_f(sdev_dat, delta_delcor0, 1, nsets, htype, nframes); /* adjust delcor0 */
else
realize_delcor_cuda(sdev_dat, delta_delcor0, 1, nsets); /* adjust delcor0 */
}
else if (newdelcor) {
if (FLOAT)
realize_delcor_cuda_f(sdev_dat, 0.0, 0, nsets, htype, nframes); /* set delcor0_save to delcor0 */
else
realize_delcor_cuda(sdev_dat, 0.0, 0, nsets); /* set delcor0_save to delcor0 */
}
if ((newspin && vary_dopscale_spin) || ((newsize || newshape) && vary_dopscale_sizeshape))
realize_dopscale_cuda_streams(sdev_par, sdev_dat, dopscale_factor, 1, nsets, dtype); /* adjust dopscale */
else if (newdopscale)
realize_dopscale_cuda_streams(sdev_par, sdev_dat, 1.0, 0, nsets, dtype); /* set dopscale_save to dopscale */
if (newxyoff)
realize_xyoff_cuda_streams(sdev_dat, nsets, dtype);
if (AF) {
calc_fits_cuda_af(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda_af(sdev_par, sdev_dat, 0, nsets);
}
else if (STREAMS) {
calc_fits_cuda_streams(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda_streams(sdev_par, sdev_dat, 0, nsets);
}
else if (STREAMS2) {
calc_fits_cuda_streams2(sdev_par, sdev_mod, sdev_dat, verts, nviews,
nframes, lc_n, htype, nsets, nf, bf_stream);
err = chi2_cuda_streams2(sdev_par, sdev_dat, htype, dtype, nframes,
lc_n, 0, nsets, bf_stream);
}
else {
calc_fits_cuda(sdev_par, sdev_mod, sdev_dat);
err = chi2_cuda(sdev_par, sdev_dat, 0);
}
/* Divide chi-square by DOF to get reduced chi-square. */
err /= sdat->dof;
/* If bestfit has set showvals = 1, display reduced chi-square. Then set
* spar->showstate = 1, so that when function penalties is called later,
* it "knows" that it should display the individual penalty values.
* Reset showstate to 0 if showvals = 0. */
if (showvals) {
printf("# %15s %e\n", "reduced chi2", err);
spar->showstate = 1;
}
else
spar->showstate = 0;
/* Compute penalties and add to reduced chi-square. Individual penalty values
* will be displayed if we set spar->showstate = 1 a few lines back. */
pens = penalties_cuda(sdev_par, sdev_mod, sdev_dat);
err += pens;
/* Double the objective function if there's an ellipsoid component with tiny
* or negative diameter, if any optical photometric parameters have invalid
* values, if any portion of the model lies outside specified POS window or
* outside any plane-of-sky fit image, or if model is too wide in delay-Dopp-
* ler space for any (delay-)Doppler fit image to be correctly constructed.
* This effectively rules out any models with any of these flaws. */
/* NOTE: TO-DO: baddiam may need to come from elsewhere other than spar.
* However, bestfit gets called only once and spar/smod/sdat gets copied
* only once.
* flags[0] = dpar->baddiam;
flags[1] = dpar->badphoto;
flags[2] = dpar->posbnd;
flags[3] = dpar->badposet;
flags[4] = dpar->badradar;
flags[5] = dpar->baddopscale;
dlogfactors[0] = dpar->bad_objfactor;
dlogfactors[1] = dpar->baddiam_logfactor;
dlogfactors[2] = dpar->badphoto_logfactor;
dlogfactors[3] = dpar->posbnd_logfactor;
dlogfactors[4] = dpar->badposet_logfactor;
dlogfactors[5] = dpar->badradar_logfactor;
dlogfactors[6] = dpar->baddopscale_logfactor;
*/
ocs_get_flags_krnl<<<1,1>>>(sdev_par, dflags, dlogfactors);
checkErrorAfterKernelLaunch("bf_get_flags_krnl");
gpuErrchk(cudaMemcpy(hflags, dflags, sizeof(unsigned char)*7,
cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(hlogfactors, dlogfactors, sizeof(double)*6,
cudaMemcpyDeviceToHost));
if (hflags[0]) {
baddiam_factor = hlogfactors[0] * exp(hlogfactors[1]);
err *= baddiam_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal ellipsoid diameters\n",
baddiam_factor);
}
if (hflags[1]) {
badphoto_factor = hlogfactors[0] * exp(hlogfactors[2]);
err *= badphoto_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal photometric parameters\n",
badphoto_factor);
}
if (hflags[2]) {
check_posbnd = 1; /* tells bestfit about this problem */
posbnd_factor = hlogfactors[0] * exp(hlogfactors[3]);
err *= posbnd_factor;
if (showvals)
printf("# objective func multiplied by %.1f: model extends beyond POS frame\n",
posbnd_factor);
}
if (hflags[3]) {
check_badposet = 1; /* tells bestfit about this problem */
badposet_factor = hlogfactors[0] * exp(hlogfactors[4]);
err *= badposet_factor;
if (showvals)
printf("# objective func multiplied by %.1f: plane-of-sky fit frame too small\n",
badposet_factor);
}
if (hflags[4]) {
check_badradar = 1; /* tells bestfit about this problem */
badradar_factor = hlogfactors[0] * exp(hlogfactors[5]);
err *= badradar_factor;
if (showvals)
printf("# objective func multiplied by %.1f: model too wide in delay-Doppler space\n",
badradar_factor);
}
if (hflags[5]) {
baddopscale_factor = hlogfactors[0] * exp(hlogfactors[6]);
err *= baddopscale_factor;
if (showvals)
printf("# objective func multiplied by %.1f: illegal Doppler scaling factors\n",
baddopscale_factor);
}
/* Reset showvals to 0 if it had been 1 (i.e., turn off display of reduced
* chi-square and the individual penalty values). */
if (showvals)
fflush( stdout);
showvals = 0;
free(hflags);
free(hlogfactors);
cudaFree(dflags);
cudaFree(dlogfactors);
return err;
}
|
859d57209323c8821d16a165893ab7ebfba25613.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include "globals.h"
#include "proto2.h"
;
extern State state;
#include <optix_device.h>
extern "C" __constant__ LaunchParams optixLaunchParams;
enum { SURFACE_RAY_TYPE = 0, RAY_TYPE_COUNT };
static __forceinline__ __device__ void *unpack_pointer(uint32_t i0,
uint32_t i1) {
const uint64_t uptr = (((static_cast<uint64_t>(i0)) << (32)) | (i1));
return reinterpret_cast<void *>(uptr);
}
static __forceinline__ __device__ void pack_pointer(void *ptr, uint32_t &i0,
uint32_t &i1) {
auto uptr = reinterpret_cast<uint64_t>(ptr);
i0 = (uptr) >> (32);
i1 = ((uptr) & (0xFFFFFFFF));
}
template <typename T> static __forceinline__ __device__ T *get_prd() {
auto u0 = optixGetPayload_0();
auto u1 = optixGetPayload_1();
return reinterpret_cast<T *>(unpack_pointer(u0, u1));
}
inline __device__ float3 random_color(int i) {
auto r = static_cast<int>(
((0x234235) + (((13) * (17) * (static_cast<unsigned>(i))))));
auto g = static_cast<int>(
((0x773477) + (((7) * (3) * (5) * (static_cast<unsigned>(i))))));
auto b = static_cast<int>(
((0x223766) + (((11) * (19) * (static_cast<unsigned>(i))))));
float3 res;
res.x = ((((r) & (255))) / ((2.55e+2f)));
res.y = ((((g) & (255))) / ((2.55e+2f)));
res.z = ((((b) & (255))) / ((2.55e+2f)));
return res;
}
extern "C" __global__ void __closesthit__radiance() {
auto id = optixGetPrimitiveIndex();
float3 *prd = get_prd<float3>();
auto c = random_color(id);
prd->x = (0.0e+0f);
prd->y = (1.e+0f);
prd->z = (0.0e+0f);
}
extern "C" __global__ void __anyhit__radiance() {}
extern "C" __global__ void __miss__radiance() {
float3 *prd = get_prd<float3>();
prd->x = (1.e-1f);
prd->y = (0.0e+0f);
prd->z = (0.0e+0f);
}
extern "C" __global__ void __exception__all() {
printf("optix exception: %d\n", optixGetExceptionCode());
}
extern "C" __global__ void __raygen__renderFrame() {
const int frameID = optixLaunchParams.frameID;
auto ix = optixGetLaunchIndex().x;
auto iy = optixGetLaunchIndex().y;
auto camera_position = optixLaunchParams.camera_position;
auto camera_direction = optixLaunchParams.camera_direction;
auto camera_horizontal = optixLaunchParams.camera_horizontal;
auto camera_vertical = optixLaunchParams.camera_vertical;
float3 pixel_color_prd;
auto u0 = uint32_t(0);
auto u1 = uint32_t(0);
auto screen =
((glm::vec2(((.5f) + (ix)), ((.5f) + (iy)))) /
(glm::vec2(optixLaunchParams.fbSize_x, optixLaunchParams.fbSize_y)));
auto ray_dir =
glm::normalize(((camera_direction) +
(((camera_horizontal) * (((screen[0]) - ((5.e-1f)))))) +
(((camera_vertical) * (((screen[1]) - ((5.e-1f))))))));
auto fbIndex = ((ix) + (((iy) * (optixLaunchParams.fbSize_x))));
pack_pointer(&pixel_color_prd, u0, u1);
auto pos = reinterpret_cast<float3 *>(&camera_position);
auto dir = reinterpret_cast<float3 *>(&ray_dir);
optixTrace(optixLaunchParams.traversable, *pos, *dir, (0.0e+0f), (1.e+20f),
(0.0e+0f), OptixVisibilityMask(255), OPTIX_RAY_FLAG_NONE,
SURFACE_RAY_TYPE, RAY_TYPE_COUNT, SURFACE_RAY_TYPE, u0, u1);
auto r = static_cast<int>((((2.5599e+2f)) * (pixel_color_prd.x)));
auto g = static_cast<int>((((2.5599e+2f)) * (pixel_color_prd.y)));
auto b = static_cast<int>((((2.5599e+2f)) * (pixel_color_prd.z)));
auto rgba = ((4278190080) | ((r) << (0)) | ((g) << (8)) | ((b) << (16)));
optixLaunchParams.colorBuffer[fbIndex] = rgba;
}; | 859d57209323c8821d16a165893ab7ebfba25613.cu |
#include "utils.h"
#include "globals.h"
#include "proto2.h"
;
extern State state;
#include <optix_device.h>
extern "C" __constant__ LaunchParams optixLaunchParams;
enum { SURFACE_RAY_TYPE = 0, RAY_TYPE_COUNT };
static __forceinline__ __device__ void *unpack_pointer(uint32_t i0,
uint32_t i1) {
const uint64_t uptr = (((static_cast<uint64_t>(i0)) << (32)) | (i1));
return reinterpret_cast<void *>(uptr);
}
static __forceinline__ __device__ void pack_pointer(void *ptr, uint32_t &i0,
uint32_t &i1) {
auto uptr = reinterpret_cast<uint64_t>(ptr);
i0 = (uptr) >> (32);
i1 = ((uptr) & (0xFFFFFFFF));
}
template <typename T> static __forceinline__ __device__ T *get_prd() {
auto u0 = optixGetPayload_0();
auto u1 = optixGetPayload_1();
return reinterpret_cast<T *>(unpack_pointer(u0, u1));
}
inline __device__ float3 random_color(int i) {
auto r = static_cast<int>(
((0x234235) + (((13) * (17) * (static_cast<unsigned>(i))))));
auto g = static_cast<int>(
((0x773477) + (((7) * (3) * (5) * (static_cast<unsigned>(i))))));
auto b = static_cast<int>(
((0x223766) + (((11) * (19) * (static_cast<unsigned>(i))))));
float3 res;
res.x = ((((r) & (255))) / ((2.55e+2f)));
res.y = ((((g) & (255))) / ((2.55e+2f)));
res.z = ((((b) & (255))) / ((2.55e+2f)));
return res;
}
extern "C" __global__ void __closesthit__radiance() {
auto id = optixGetPrimitiveIndex();
float3 *prd = get_prd<float3>();
auto c = random_color(id);
prd->x = (0.0e+0f);
prd->y = (1.e+0f);
prd->z = (0.0e+0f);
}
extern "C" __global__ void __anyhit__radiance() {}
extern "C" __global__ void __miss__radiance() {
float3 *prd = get_prd<float3>();
prd->x = (1.e-1f);
prd->y = (0.0e+0f);
prd->z = (0.0e+0f);
}
extern "C" __global__ void __exception__all() {
printf("optix exception: %d\n", optixGetExceptionCode());
}
extern "C" __global__ void __raygen__renderFrame() {
const int frameID = optixLaunchParams.frameID;
auto ix = optixGetLaunchIndex().x;
auto iy = optixGetLaunchIndex().y;
auto camera_position = optixLaunchParams.camera_position;
auto camera_direction = optixLaunchParams.camera_direction;
auto camera_horizontal = optixLaunchParams.camera_horizontal;
auto camera_vertical = optixLaunchParams.camera_vertical;
float3 pixel_color_prd;
auto u0 = uint32_t(0);
auto u1 = uint32_t(0);
auto screen =
((glm::vec2(((.5f) + (ix)), ((.5f) + (iy)))) /
(glm::vec2(optixLaunchParams.fbSize_x, optixLaunchParams.fbSize_y)));
auto ray_dir =
glm::normalize(((camera_direction) +
(((camera_horizontal) * (((screen[0]) - ((5.e-1f)))))) +
(((camera_vertical) * (((screen[1]) - ((5.e-1f))))))));
auto fbIndex = ((ix) + (((iy) * (optixLaunchParams.fbSize_x))));
pack_pointer(&pixel_color_prd, u0, u1);
auto pos = reinterpret_cast<float3 *>(&camera_position);
auto dir = reinterpret_cast<float3 *>(&ray_dir);
optixTrace(optixLaunchParams.traversable, *pos, *dir, (0.0e+0f), (1.e+20f),
(0.0e+0f), OptixVisibilityMask(255), OPTIX_RAY_FLAG_NONE,
SURFACE_RAY_TYPE, RAY_TYPE_COUNT, SURFACE_RAY_TYPE, u0, u1);
auto r = static_cast<int>((((2.5599e+2f)) * (pixel_color_prd.x)));
auto g = static_cast<int>((((2.5599e+2f)) * (pixel_color_prd.y)));
auto b = static_cast<int>((((2.5599e+2f)) * (pixel_color_prd.z)));
auto rgba = ((4278190080) | ((r) << (0)) | ((g) << (8)) | ((b) << (16)));
optixLaunchParams.colorBuffer[fbIndex] = rgba;
}; |
9e96332682db982a6f5a48579626841c210b7526.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "dali/kernels/test/kernel_poc_test.h"
namespace dali {
namespace kernels {
// Performs elementwise MAD (multiply-add).
template <typename Input1, typename Input2, typename Output>
__global__ void
ElementwiseMAD(size_t n, Output *o, const Input1 *i1, const Input2 *i2, float alpha) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
o[idx] = i1[idx] * alpha + i2[idx];
}
// Performs elementwise MAD (multiply-add).
template <typename Input1, typename Input2, typename Output>
struct MADKernelGPU {
KernelRequirements Setup(
KernelContext &context,
const InListGPU<Input1, 3> &i1,
const InListGPU<Input2, 3> &i2,
float A) {
KernelRequirements req;
req.output_shapes = { i1.shape };
return req;
}
void Run(
KernelContext &context,
const OutListGPU<Output, 3> &o,
const InListGPU<Input1, 3> &i1,
const InListGPU<Input2, 3> &i2,
float A) {
{
auto n = i1.num_elements();
assert(i2.num_elements() == n);
assert(o.num_elements() == n);
}
for (int i = 0; i < o.num_samples(); i++) {
auto tv1 = i1[i];
auto tv2 = i2[i];
auto tvo = o[i];
auto n = tv1.num_elements();
size_t block = 1024;
size_t grid = (n + block - 1) / block;
hipLaunchKernelGGL(( ElementwiseMAD), dim3(grid), dim3(block), 0, context.gpu.stream, n, tvo.data, tv1.data, tv2.data, A);
}
}
};
template <typename Kernel_>
class KernelPoC_GPU : public ::testing::Test, public KernelPoCFixture<StorageGPU, Kernel_> {
};
using PoC_MAD_GPU = ::testing::Types<
MADKernelGPU<float, float, float>,
MADKernelGPU<int, float, float>,
MADKernelGPU<float, int, float>,
MADKernelGPU<int, int, int>
>;
TYPED_TEST_SUITE(KernelPoC_GPU, PoC_MAD_GPU);
TYPED_TEST(KernelPoC_GPU, All) {
this->RunImpl();
}
} // namespace kernels
} // namespace dali
| 9e96332682db982a6f5a48579626841c210b7526.cu | // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include "dali/kernels/test/kernel_poc_test.h"
namespace dali {
namespace kernels {
// Performs elementwise MAD (multiply-add).
template <typename Input1, typename Input2, typename Output>
__global__ void
ElementwiseMAD(size_t n, Output *o, const Input1 *i1, const Input2 *i2, float alpha) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
o[idx] = i1[idx] * alpha + i2[idx];
}
// Performs elementwise MAD (multiply-add).
template <typename Input1, typename Input2, typename Output>
struct MADKernelGPU {
KernelRequirements Setup(
KernelContext &context,
const InListGPU<Input1, 3> &i1,
const InListGPU<Input2, 3> &i2,
float A) {
KernelRequirements req;
req.output_shapes = { i1.shape };
return req;
}
void Run(
KernelContext &context,
const OutListGPU<Output, 3> &o,
const InListGPU<Input1, 3> &i1,
const InListGPU<Input2, 3> &i2,
float A) {
{
auto n = i1.num_elements();
assert(i2.num_elements() == n);
assert(o.num_elements() == n);
}
for (int i = 0; i < o.num_samples(); i++) {
auto tv1 = i1[i];
auto tv2 = i2[i];
auto tvo = o[i];
auto n = tv1.num_elements();
size_t block = 1024;
size_t grid = (n + block - 1) / block;
ElementwiseMAD<<<grid, block, 0, context.gpu.stream>>>(n, tvo.data, tv1.data, tv2.data, A);
}
}
};
template <typename Kernel_>
class KernelPoC_GPU : public ::testing::Test, public KernelPoCFixture<StorageGPU, Kernel_> {
};
using PoC_MAD_GPU = ::testing::Types<
MADKernelGPU<float, float, float>,
MADKernelGPU<int, float, float>,
MADKernelGPU<float, int, float>,
MADKernelGPU<int, int, int>
>;
TYPED_TEST_SUITE(KernelPoC_GPU, PoC_MAD_GPU);
TYPED_TEST(KernelPoC_GPU, All) {
this->RunImpl();
}
} // namespace kernels
} // namespace dali
|
4eb1ef9dd28d61f64cc51ec234040380d52d2731.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/chemv_mgpu.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include "syhemv_mgpu_core.cuh"
#include "syhemv_mgpu_offset_core.cuh"
#include "defs.h"
#if(TARGET_SM >= 30)
#define chemv_upper_bs (32)
#define chemv_upper_ty (4)
#define chemv_lower_bs (32)
#define chemv_lower_ty (4)
#else
#define chemv_upper_bs (64)
#define chemv_upper_ty (8)
#define chemv_lower_bs (32)
#define chemv_lower_ty (4)
#endif
int kblas_chemv_mgpu_driver( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
int ngpus, int gpu_gid,
hipStream_t stream = 0)
{
// handle the case when incx and/or incy is -ve
if(incx < 0) dX -= (m-1) * incx;
if(incy < 0) dY -= (m-1) * incy;
if(uplo == 'U' || uplo == 'u')
{
/** configuration params **/
/**
* If you change the configuration parameters,
* you must revise the case statement of the upper case
* to make sure it covers all the possible cases
**/
const int chemv_bs = chemv_upper_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_upper_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
const int chemv_upper_by = 2*ngpus;
/** end configuration params **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_upper_by);
//if (mod == 0) mod = chemv_bs;
if(mod == 0)
{
hipLaunchKernelGGL(( syhemvu_mgpu_special_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes);
hipLaunchKernelGGL(( syhemvu_mgpu_special_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes);
}
else
{
hipLaunchKernelGGL(( syhemvu_mgpu_generic_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes);
// for the non-diagonal part choose between a templatized irregular part or a variable one
const int irregular_part = mod % elements_per_thread;
if(0)
{}
else
{ // Templatized irregular_part
/**
* The upper case kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
switch(irregular_part)
{
case 0:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 1:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 2:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 3:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 4:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 5:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 6:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 7:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 8:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
// return error otherwise:
default: printf("CHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
}
else if(uplo == 'L' || uplo == 'l')
{
/** configuration params **/
const int chemv_bs = chemv_lower_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_lower_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
const int chemv_lower_by = 2*ngpus; // design rule, feel free to change it
/** end configuration params **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_lower_by);
if(mod == 0)
{
hipLaunchKernelGGL(( syhemvl_mgpu_special_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes);
hipLaunchKernelGGL(( syhemvl_mgpu_special_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes);
}
else
{
hipLaunchKernelGGL(( syhemvl_mgpu_generic_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes);
hipLaunchKernelGGL(( syhemvl_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes);
}
}
else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;}
return 0;
}
/*************************************************************************************/
int kblas_chemv_mgpu_driver_offset( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
int ngpus, int gpu_gid,
int offset,
hipStream_t stream = 0)
{
// handle the case when incx and/or incy is -ve
if(incx < 0) dX -= (m-1) * incx;
if(incy < 0) dY -= (m-1) * incy;
if(uplo == 'U' || uplo == 'u')
{
/** configuration params **/
const int chemv_bs = chemv_upper_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_upper_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
const int chemv_upper_by = 2*ngpus; // design rule, feel free to change it
/** end configuration params **/
/** offset necessary calculation **/
int offset_ = offset % chemv_bs;
int total_blocks_skipped = offset / chemv_bs;
int my_skipped_blocks = total_blocks_skipped/ngpus;
if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1;
int ref_gpu = total_blocks_skipped%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks * chemv_bs * lda;
dA += total_blocks_skipped * chemv_bs;
dX += total_blocks_skipped * chemv_bs * incx;
dY += total_blocks_skipped * chemv_bs * incy;
m -= total_blocks_skipped * chemv_bs;
/** end offset necessary calculation **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_upper_by);
if(mod == 0)
{
hipLaunchKernelGGL(( syhemvu_mgpu_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_);
hipLaunchKernelGGL(( syhemvu_mgpu_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_);
}
else
{
hipLaunchKernelGGL(( syhemvu_mgpu_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_);
const int irregular_part = mod % elements_per_thread;
/**
* The upper case kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
switch(irregular_part)
{
case 0:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 1:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 2:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 3:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 4:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 5:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 6:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 7:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 8:hipLaunchKernelGGL(( syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
// return error otherwise:
default: printf("CHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else if(uplo == 'L' || uplo == 'l')
{
/** configuration params **/
const int chemv_bs = chemv_lower_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_lower_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
const int chemv_lower_by = 2*ngpus; // design rule, feel free to change it
/** end configuration params **/
/** offset necessary calculation **/
int offset_ = offset % chemv_bs;
int total_blocks_skipped = offset / chemv_bs;
int my_skipped_blocks = total_blocks_skipped/ngpus;
if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1;
int ref_gpu = total_blocks_skipped%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks * chemv_bs * lda;
dA += total_blocks_skipped * chemv_bs;
dX += total_blocks_skipped * chemv_bs * incx;
dY += total_blocks_skipped * chemv_bs * incy;
m -= total_blocks_skipped * chemv_bs;
/** end offset necessary calculation **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_lower_by);
if(mod == 0)
{
hipLaunchKernelGGL(( syhemvl_mgpu_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_);
hipLaunchKernelGGL(( syhemvl_mgpu_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_);
}
else
{
hipLaunchKernelGGL(( syhemvl_mgpu_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_);
hipLaunchKernelGGL(( syhemvl_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_);
}
}
else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_chemv_mgpu( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex **dA, int lda,
cuFloatComplex **dX, int incx,
cuFloatComplex beta, cuFloatComplex **dY, int incy,
int ngpus,
int offset)
{
const int ngpus_local = ngpus;
if(offset == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_chemv_mgpu_driver(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i]);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_chemv_mgpu_driver_offset(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], offset);
}
}
// wait for gpus to finish
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
hipDeviceSynchronize();
}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_chemv_mgpu_async( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex **dA, int lda,
cuFloatComplex **dX, int incx,
cuFloatComplex beta, cuFloatComplex **dY, int incy,
int ngpus,
int offset,
hipStream_t stream[MAX_NGPUS][MAX_STREAMS])
{
const int ngpus_local = ngpus;
if(offset == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_chemv_mgpu_driver(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], stream[i][0]);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_chemv_mgpu_driver_offset(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], offset, stream[i][0]);
}
}
return 0;
}
/*************************************************************************************/
extern "C"
int get_chemv_mgpu_bs(char uplo)
{
if(uplo == 'l' || uplo == 'L')
return chemv_lower_bs;
else if (uplo == 'u' || uplo == 'U')
return chemv_upper_bs;
else
{printf("Error .. input %c is not supported for hemv \n", uplo); return -1;}
}
| 4eb1ef9dd28d61f64cc51ec234040380d52d2731.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/chemv_mgpu.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include "syhemv_mgpu_core.cuh"
#include "syhemv_mgpu_offset_core.cuh"
#include "defs.h"
#if(TARGET_SM >= 30)
#define chemv_upper_bs (32)
#define chemv_upper_ty (4)
#define chemv_lower_bs (32)
#define chemv_lower_ty (4)
#else
#define chemv_upper_bs (64)
#define chemv_upper_ty (8)
#define chemv_lower_bs (32)
#define chemv_lower_ty (4)
#endif
int kblas_chemv_mgpu_driver( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
int ngpus, int gpu_gid,
cudaStream_t stream = 0)
{
// handle the case when incx and/or incy is -ve
if(incx < 0) dX -= (m-1) * incx;
if(incy < 0) dY -= (m-1) * incy;
if(uplo == 'U' || uplo == 'u')
{
/** configuration params **/
/**
* If you change the configuration parameters,
* you must revise the case statement of the upper case
* to make sure it covers all the possible cases
**/
const int chemv_bs = chemv_upper_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_upper_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
const int chemv_upper_by = 2*ngpus;
/** end configuration params **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_upper_by);
//if (mod == 0) mod = chemv_bs;
if(mod == 0)
{
syhemvu_mgpu_special_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes);
syhemvu_mgpu_special_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes);
}
else
{
syhemvu_mgpu_generic_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes);
// for the non-diagonal part choose between a templatized irregular part or a variable one
const int irregular_part = mod % elements_per_thread;
if(0)
{}
else
{ // Templatized irregular_part
/**
* The upper case kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
switch(irregular_part)
{
case 0: syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 1: syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 2: syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 3: syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 4: syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 5: syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 6: syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 7: syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
case 8: syhemvu_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes); break;
// return error otherwise:
default: printf("CHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
}
else if(uplo == 'L' || uplo == 'l')
{
/** configuration params **/
const int chemv_bs = chemv_lower_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_lower_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
const int chemv_lower_by = 2*ngpus; // design rule, feel free to change it
/** end configuration params **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_lower_by);
if(mod == 0)
{
syhemvl_mgpu_special_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes);
syhemvl_mgpu_special_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, nstripes);
}
else
{
syhemvl_mgpu_generic_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes);
syhemvl_mgpu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, gpu_gid, ngpus, nstripes);
}
}
else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;}
return 0;
}
/*************************************************************************************/
int kblas_chemv_mgpu_driver_offset( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
int ngpus, int gpu_gid,
int offset,
cudaStream_t stream = 0)
{
// handle the case when incx and/or incy is -ve
if(incx < 0) dX -= (m-1) * incx;
if(incy < 0) dY -= (m-1) * incy;
if(uplo == 'U' || uplo == 'u')
{
/** configuration params **/
const int chemv_bs = chemv_upper_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_upper_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
const int chemv_upper_by = 2*ngpus; // design rule, feel free to change it
/** end configuration params **/
/** offset necessary calculation **/
int offset_ = offset % chemv_bs;
int total_blocks_skipped = offset / chemv_bs;
int my_skipped_blocks = total_blocks_skipped/ngpus;
if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1;
int ref_gpu = total_blocks_skipped%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks * chemv_bs * lda;
dA += total_blocks_skipped * chemv_bs;
dX += total_blocks_skipped * chemv_bs * incx;
dY += total_blocks_skipped * chemv_bs * incy;
m -= total_blocks_skipped * chemv_bs;
/** end offset necessary calculation **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_upper_by);
if(mod == 0)
{
syhemvu_mgpu_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_);
syhemvu_mgpu_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_);
}
else
{
syhemvu_mgpu_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_);
const int irregular_part = mod % elements_per_thread;
/**
* The upper case kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
switch(irregular_part)
{
case 0: syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 1: syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 2: syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 3: syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 4: syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 5: syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 6: syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 7: syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
case 8: syhemvu_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_); break;
// return error otherwise:
default: printf("CHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else if(uplo == 'L' || uplo == 'l')
{
/** configuration params **/
const int chemv_bs = chemv_lower_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_lower_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
const int chemv_lower_by = 2*ngpus; // design rule, feel free to change it
/** end configuration params **/
/** offset necessary calculation **/
int offset_ = offset % chemv_bs;
int total_blocks_skipped = offset / chemv_bs;
int my_skipped_blocks = total_blocks_skipped/ngpus;
if(gpu_gid < (total_blocks_skipped%ngpus)) my_skipped_blocks += 1;
int ref_gpu = total_blocks_skipped%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks * chemv_bs * lda;
dA += total_blocks_skipped * chemv_bs;
dX += total_blocks_skipped * chemv_bs * incx;
dY += total_blocks_skipped * chemv_bs * incy;
m -= total_blocks_skipped * chemv_bs;
/** end offset necessary calculation **/
int mod = m % chemv_bs;
int nstripes = m / chemv_bs + (mod != 0);
int blocks = nstripes/ngpus;
if(new_gpu_gid < (nstripes%ngpus) ) blocks += 1;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_lower_by);
if(mod == 0)
{
syhemvl_mgpu_special_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_);
syhemvl_mgpu_special_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_);
}
else
{
syhemvl_mgpu_generic_d_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_);
syhemvl_mgpu_generic_nd_offset<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod, new_gpu_gid, ngpus, nstripes, offset_);
}
}
else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_chemv_mgpu( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex **dA, int lda,
cuFloatComplex **dX, int incx,
cuFloatComplex beta, cuFloatComplex **dY, int incy,
int ngpus,
int offset)
{
const int ngpus_local = ngpus;
if(offset == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_chemv_mgpu_driver(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i]);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_chemv_mgpu_driver_offset(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], offset);
}
}
// wait for gpus to finish
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
cudaDeviceSynchronize();
}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_chemv_mgpu_async( char uplo, int m,
cuFloatComplex alpha, cuFloatComplex **dA, int lda,
cuFloatComplex **dX, int incx,
cuFloatComplex beta, cuFloatComplex **dY, int incy,
int ngpus,
int offset,
cudaStream_t stream[MAX_NGPUS][MAX_STREAMS])
{
const int ngpus_local = ngpus;
if(offset == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_chemv_mgpu_driver(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], stream[i][0]);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_chemv_mgpu_driver_offset(uplo, m, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, ngpus, gpu_gid[i], offset, stream[i][0]);
}
}
return 0;
}
/*************************************************************************************/
extern "C"
int get_chemv_mgpu_bs(char uplo)
{
if(uplo == 'l' || uplo == 'L')
return chemv_lower_bs;
else if (uplo == 'u' || uplo == 'U')
return chemv_upper_bs;
else
{printf("Error .. input %c is not supported for hemv \n", uplo); return -1;}
}
|
c7b6aabd026a03822daf2eb02b4d33c041dcaf17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
#include <stdio.h>
__global__ void minval(const float* const d_logLuminance, float *mins, unsigned pixels){
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id>=pixels) return;
unsigned idl = threadIdx.x;
extern __shared__ float shared[];
//size of shared[] is given as 3rd parameter while launching the kernel
int i;
shared[idl] = d_logLuminance[id];
__syncthreads();
i = blockDim.x>>1;
while(i){
if(idl<i)
shared[idl] = min(shared[idl],shared[idl+i]);
__syncthreads();
i=i>>1;
}
if(0==idl){
mins[blockIdx.x] = shared[0];
}
}
__global__ void maxval(const float* const d_logLuminance, float *maxs, unsigned pixels){
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id>=pixels) return;
unsigned idl = threadIdx.x;
extern __shared__ float shared[];
//size of shared[] is given as 3rd parameter while launching the kernel
int i;
shared[idl] = d_logLuminance[id];
__syncthreads();
i = blockDim.x>>1;
while(i){
if(idl<i)
shared[idl] = max(shared[idl],shared[idl+i]);
__syncthreads();
i=i>>1;
}
if(0==idl){
maxs[blockIdx.x] = shared[0];
}
}
__global__ void createHisto(float min, float range, unsigned numBins,const float * const data, unsigned* histo){
int id = threadIdx.x + blockDim.x*blockIdx.x;
atomicAdd(histo+(int)((data[id]-min)/range*numBins)+blockIdx.x*numBins,1u);
}
__global__ void reduceHisto(unsigned *histos){
unsigned idl = threadIdx.x;
//gridDim.x = numBins, blockIdx.x = bin
//blockDim.x = num_histo, threadIdx.x = histo_id
extern __shared__ unsigned histoidl[];
//size of shared[] is given as 3rd parameter while launching the kernel
int i;
histoidl[idl] = histos[idl*gridDim.x+blockIdx.x];
__syncthreads();
i = gridDim.x>>1;
while(i){
if(idl<i)
histoidl[idl] += histoidl[idl+i];
__syncthreads();
i=i>>1;
}
if(0==idl){
histos[blockIdx.x] = histoidl[0];
}
}
__global__ void HillisSteeleScan(unsigned *data, unsigned *d_cdf){
unsigned idl = threadIdx.x; //blockDim.x=1
extern __shared__ unsigned datasegment[];
datasegment[idl] = data[idl];
__syncthreads();
for(int step=1;step<blockDim.x;step<<=1){
if(idl<step)
datasegment[idl+blockDim.x] = datasegment[idl];
else
datasegment[idl+blockDim.x] = datasegment[idl] + datasegment[idl-step];
__syncthreads();
datasegment[idl] = datasegment[idl+blockDim.x];
__syncthreads();
}
d_cdf[idl] = datasegment[idl+blockDim.x];
}
__global__ void incluToExclusive(unsigned *data, unsigned *element){
int id = threadIdx.x + blockDim.x*blockIdx.x;
data[id] -= element[id];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1)DONE: find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2)DONE: subtract them to find the range
3)DONE: generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
int pixels = numRows*numCols; printf("there %9d pixels\n", pixels);
int threads = 1024;
int blocks = (pixels+threads-1)/threads; printf("will launch %9d blocks\n", blocks);
int num_histo = 1024;
float *mins;
float *maxs;
float *h_mins = new float[blocks];
float *h_maxs = new float[blocks];
checkCudaErrors(hipMalloc(&mins,sizeof(float)*blocks));
checkCudaErrors(hipMalloc(&maxs,sizeof(float)*blocks));
hipLaunchKernelGGL(( minval), dim3(blocks),dim3(threads), sizeof(float)*threads, 0, d_logLuminance,mins,pixels);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( maxval), dim3(blocks),dim3(threads), sizeof(float)*threads, 0, d_logLuminance,maxs,pixels);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(h_mins,mins,sizeof(float)*blocks,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_maxs,maxs,sizeof(float)*blocks,hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(mins));
checkCudaErrors(hipFree(maxs));
min_logLum = h_mins[0];
max_logLum = h_maxs[0];
for(int i=1;i<blocks;i++){
min_logLum = min(min_logLum,h_mins[i]);
max_logLum = max(max_logLum,h_maxs[i]);
}
delete h_mins;
delete h_maxs;
printf("min = %6.3f, max=%6.3f\n", min_logLum, max_logLum);
/*
//min and max are correct
float tempmin, tempmax;
float * h_logLuminance = new float[pixels];
checkCudaErrors(hipMemcpy(h_logLuminance, d_logLuminance, sizeof(float)*pixels, hipMemcpyDeviceToHost));
tempmin=h_logLuminance[0];
tempmax=tempmin;
for(int i=1;i<pixels;i++){
tempmin=min(tempmin,h_logLuminance[i]);
tempmax=max(tempmax,h_logLuminance[i]);
}
delete h_logLuminance;
printf("min should be %6.3f, max should be %6.3f\n", tempmin, tempmax);
*/
printf("In the range, there are %d bins\n",numBins);
unsigned *d_histo;
checkCudaErrors(hipMalloc(&d_histo,sizeof(unsigned)*numBins*num_histo));
checkCudaErrors(hipMemset(d_histo, 0, sizeof(unsigned)*numBins*num_histo));
hipLaunchKernelGGL(( createHisto), dim3(num_histo), dim3(pixels/num_histo), 0, 0,
min_logLum,max_logLum-min_logLum,numBins, d_logLuminance,d_histo);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//unsigned *parthisto = new unsigned[numBins];
printf("Histos created!\n");
/*
checkCudaErrors(hipMemcpy(parthisto, d_histo, sizeof(unsigned)*numBins, hipMemcpyDeviceToHost));
for(int i=0;i<numBins;i++){
printf("%3d",parthisto[i]);
if(31==i%32) printf("\n");
}
checkCudaErrors(hipMemcpy(parthisto, d_histo+numBins, sizeof(unsigned)*numBins, hipMemcpyDeviceToHost));
for(int i=0;i<numBins;i++){
printf("%3d",parthisto[i]);
if(31==i%32) printf("\n");
}
*/
printf("Now begin recuding .... \n");
hipLaunchKernelGGL(( reduceHisto), dim3(numBins), dim3(num_histo), sizeof(unsigned)*num_histo, 0, d_histo);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
/*
checkCudaErrors(hipMemcpy(parthisto, d_histo, sizeof(unsigned)*numBins, hipMemcpyDeviceToHost));
for(int i=0;i<numBins;i++){
printf("%5d",parthisto[i]);
if(15==i%16) printf("\n");
}
*/
printf("Reduced done! Now begin scanning ...\n");
hipLaunchKernelGGL(( HillisSteeleScan), dim3(1), dim3(numBins), sizeof(unsigned)*numBins*2, 0, d_histo,d_cdf);
//Algorithm only allows one block, otherwise kernel give segments scanned but not totally scanned
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( incluToExclusive), dim3(1), dim3(numBins), 0, 0, d_cdf,d_histo);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_histo));
printf("Scanning done! Now inclusive to exclusive ... \n");
/*
checkCudaErrors(hipMemcpy(parthisto, d_cdf, sizeof(unsigned)*numBins, hipMemcpyDeviceToHost));
for(int i=0;i<numBins;i++){
printf("%6d",parthisto[i]);
if(15==i%16) printf("\n");
}
delete parthisto;
*/
}
| c7b6aabd026a03822daf2eb02b4d33c041dcaf17.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
#include <stdio.h>
__global__ void minval(const float* const d_logLuminance, float *mins, unsigned pixels){
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id>=pixels) return;
unsigned idl = threadIdx.x;
extern __shared__ float shared[];
//size of shared[] is given as 3rd parameter while launching the kernel
int i;
shared[idl] = d_logLuminance[id];
__syncthreads();
i = blockDim.x>>1;
while(i){
if(idl<i)
shared[idl] = min(shared[idl],shared[idl+i]);
__syncthreads();
i=i>>1;
}
if(0==idl){
mins[blockIdx.x] = shared[0];
}
}
__global__ void maxval(const float* const d_logLuminance, float *maxs, unsigned pixels){
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id>=pixels) return;
unsigned idl = threadIdx.x;
extern __shared__ float shared[];
//size of shared[] is given as 3rd parameter while launching the kernel
int i;
shared[idl] = d_logLuminance[id];
__syncthreads();
i = blockDim.x>>1;
while(i){
if(idl<i)
shared[idl] = max(shared[idl],shared[idl+i]);
__syncthreads();
i=i>>1;
}
if(0==idl){
maxs[blockIdx.x] = shared[0];
}
}
__global__ void createHisto(float min, float range, unsigned numBins,const float * const data, unsigned* histo){
int id = threadIdx.x + blockDim.x*blockIdx.x;
atomicAdd(histo+(int)((data[id]-min)/range*numBins)+blockIdx.x*numBins,1u);
}
__global__ void reduceHisto(unsigned *histos){
unsigned idl = threadIdx.x;
//gridDim.x = numBins, blockIdx.x = bin
//blockDim.x = num_histo, threadIdx.x = histo_id
extern __shared__ unsigned histoidl[];
//size of shared[] is given as 3rd parameter while launching the kernel
int i;
histoidl[idl] = histos[idl*gridDim.x+blockIdx.x];
__syncthreads();
i = gridDim.x>>1;
while(i){
if(idl<i)
histoidl[idl] += histoidl[idl+i];
__syncthreads();
i=i>>1;
}
if(0==idl){
histos[blockIdx.x] = histoidl[0];
}
}
__global__ void HillisSteeleScan(unsigned *data, unsigned *d_cdf){
unsigned idl = threadIdx.x; //blockDim.x=1
extern __shared__ unsigned datasegment[];
datasegment[idl] = data[idl];
__syncthreads();
for(int step=1;step<blockDim.x;step<<=1){
if(idl<step)
datasegment[idl+blockDim.x] = datasegment[idl];
else
datasegment[idl+blockDim.x] = datasegment[idl] + datasegment[idl-step];
__syncthreads();
datasegment[idl] = datasegment[idl+blockDim.x];
__syncthreads();
}
d_cdf[idl] = datasegment[idl+blockDim.x];
}
__global__ void incluToExclusive(unsigned *data, unsigned *element){
int id = threadIdx.x + blockDim.x*blockIdx.x;
data[id] -= element[id];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1)DONE: find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2)DONE: subtract them to find the range
3)DONE: generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
int pixels = numRows*numCols; printf("there %9d pixels\n", pixels);
int threads = 1024;
int blocks = (pixels+threads-1)/threads; printf("will launch %9d blocks\n", blocks);
int num_histo = 1024;
float *mins;
float *maxs;
float *h_mins = new float[blocks];
float *h_maxs = new float[blocks];
checkCudaErrors(cudaMalloc(&mins,sizeof(float)*blocks));
checkCudaErrors(cudaMalloc(&maxs,sizeof(float)*blocks));
minval<<<blocks,threads, sizeof(float)*threads>>>(d_logLuminance,mins,pixels);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
maxval<<<blocks,threads, sizeof(float)*threads>>>(d_logLuminance,maxs,pixels);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(h_mins,mins,sizeof(float)*blocks,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_maxs,maxs,sizeof(float)*blocks,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(mins));
checkCudaErrors(cudaFree(maxs));
min_logLum = h_mins[0];
max_logLum = h_maxs[0];
for(int i=1;i<blocks;i++){
min_logLum = min(min_logLum,h_mins[i]);
max_logLum = max(max_logLum,h_maxs[i]);
}
delete h_mins;
delete h_maxs;
printf("min = %6.3f, max=%6.3f\n", min_logLum, max_logLum);
/*
//min and max are correct
float tempmin, tempmax;
float * h_logLuminance = new float[pixels];
checkCudaErrors(cudaMemcpy(h_logLuminance, d_logLuminance, sizeof(float)*pixels, cudaMemcpyDeviceToHost));
tempmin=h_logLuminance[0];
tempmax=tempmin;
for(int i=1;i<pixels;i++){
tempmin=min(tempmin,h_logLuminance[i]);
tempmax=max(tempmax,h_logLuminance[i]);
}
delete h_logLuminance;
printf("min should be %6.3f, max should be %6.3f\n", tempmin, tempmax);
*/
printf("In the range, there are %d bins\n",numBins);
unsigned *d_histo;
checkCudaErrors(cudaMalloc(&d_histo,sizeof(unsigned)*numBins*num_histo));
checkCudaErrors(cudaMemset(d_histo, 0, sizeof(unsigned)*numBins*num_histo));
createHisto<<<num_histo, pixels/num_histo>>>(
min_logLum,max_logLum-min_logLum,numBins, d_logLuminance,d_histo);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//unsigned *parthisto = new unsigned[numBins];
printf("Histos created!\n");
/*
checkCudaErrors(cudaMemcpy(parthisto, d_histo, sizeof(unsigned)*numBins, cudaMemcpyDeviceToHost));
for(int i=0;i<numBins;i++){
printf("%3d",parthisto[i]);
if(31==i%32) printf("\n");
}
checkCudaErrors(cudaMemcpy(parthisto, d_histo+numBins, sizeof(unsigned)*numBins, cudaMemcpyDeviceToHost));
for(int i=0;i<numBins;i++){
printf("%3d",parthisto[i]);
if(31==i%32) printf("\n");
}
*/
printf("Now begin recuding .... \n");
reduceHisto<<<numBins, num_histo, sizeof(unsigned)*num_histo>>>(d_histo);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
/*
checkCudaErrors(cudaMemcpy(parthisto, d_histo, sizeof(unsigned)*numBins, cudaMemcpyDeviceToHost));
for(int i=0;i<numBins;i++){
printf("%5d",parthisto[i]);
if(15==i%16) printf("\n");
}
*/
printf("Reduced done! Now begin scanning ...\n");
HillisSteeleScan<<<1, numBins, sizeof(unsigned)*numBins*2>>>(d_histo,d_cdf);
//Algorithm only allows one block, otherwise kernel give segments scanned but not totally scanned
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
incluToExclusive<<<1, numBins>>>(d_cdf,d_histo);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_histo));
printf("Scanning done! Now inclusive to exclusive ... \n");
/*
checkCudaErrors(cudaMemcpy(parthisto, d_cdf, sizeof(unsigned)*numBins, cudaMemcpyDeviceToHost));
for(int i=0;i<numBins;i++){
printf("%6d",parthisto[i]);
if(15==i%16) printf("\n");
}
delete parthisto;
*/
}
|
e55ce1c9f56389d9810f077103a0bf76be578ce0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _ZQ_CUDA_POISSON_SOLVER_3D_CU_
#define _ZQ_CUDA_POISSON_SOLVER_3D_CU_
#include "ZQ_CUDA_PoissonSolver3D.cuh"
#include "ZQ_CUDA_ImageProcessing3D.cuh"
namespace ZQ_CUDA_PoissonSolver3D
{
__global__
void Regular_to_MAC_u_Kernel(float* mac_u, const float* u, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x > width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
if(x == 0)
mac_u[z*height*(width+1)+y*(width+1)+x] = u[z*height*width+y*width+x];
else if(x == width)
mac_u[z*height*(width+1)+y*(width+1)+x] = u[z*height*width+y*width+x-1];
else
mac_u[z*height*(width+1)+y*(width+1)+x] = 0.5f*(u[z*height*width+y*width+x-1]+u[z*height*width+y*width+x]);
}
}
__global__
void Regular_to_MAC_v_Kernel(float* mac_v, const float* v, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y > height)
return ;
for(int z = 0;z < depth;z++)
{
if(y == 0)
mac_v[z*(height+1)*width+y*width+x] = v[z*height*width+y*width+x];
else if(y == height)
mac_v[z*(height+1)*width+y*width+x] = v[z*height*width+(y-1)*width+x];
else
mac_v[z*(height+1)*width+y*width+x] = 0.5f*(v[z*height*width+(y-1)*width+x]+v[z*height*width+y*width+x]);
}
}
__global__
void Regular_to_MAC_w_Kernel(float* mac_w, const float* w, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y > height)
return ;
mac_w[y*width+x] = w[y*width+x];
mac_w[depth*height*width+y*width+x] = w[(depth-1)*height*width+x];
for(int z = 1;z < depth;z++)
{
mac_w[z*height*width+y*width+x] = 0.5f*(w[(z-1)*height*width+y*width+x]+w[z*height*width+y*width+x]);
}
}
__global__
void MAC_to_Regular_vel_Kernel(float* u, float* v, float* w, const float* mac_u, const float* mac_v, const float* mac_w, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
u[z*height*width+y*width+x] = 0.5f*(mac_u[z*height*(width+1)+y*(width+1)+x]+mac_u[z*height*(width+1)+y*(width+1)+x+1]);
v[z*height*width+y*width+x] = 0.5f*(mac_v[z*(height+1)*width+y*width+x]+mac_v[z*(height+1)*width+(y+1)*width+x]);
w[z*height*width+y*width+x] = 0.5f*(mac_w[z*height*width+y*width+x]+mac_w[(z+1)*height*width+y*width+x]);
}
}
__global__
void Calculate_Divergence_of_MAC_Kernel(float* divergence, const float* mac_u, const float* mac_v, const float* mac_w, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
divergence[z*height*width+y*width+x] = mac_u[z*height*(width+1)+y*(width+1)+x+1] - mac_u[z*height*(width+1)+y*(width+1)+x]
+ mac_v[z*(height+1)*width+(y+1)*width+x] - mac_v[z*(height+1)*width+y*width+x]
+ mac_w[(z+1)*height*width+y*width+x] - mac_w[z*height*width+y*width+x];
}
}
__global__
void Calculate_Divergence_of_MAC_FaceRatio_Kernel(float* divergence, const float* mac_u, const float* mac_v, const float* mac_w,
const float* unoccupyU, const float* unoccupyV, const float* unoccupyW,
const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
divergence[z*height*width+y*width+x] =
mac_u[z*height*(width+1)+y*(width+1)+x+1]*unoccupyU[z*height*(width+1)+y*(width+1)+x+1]
- mac_u[z*height*(width+1)+y*(width+1)+x]*unoccupyU[z*height*(width+1)+y*(width+1)+x]
+ mac_v[z*(height+1)*width+(y+1)*width+x]*unoccupyV[z*(height+1)*width+(y+1)*width+x]
- mac_v[z*(height+1)*width+y*width+x]*unoccupyV[z*(height+1)*width+y*width+x]
+ mac_w[(z+1)*height*width+y*width+x]*unoccupyW[(z+1)*height*width+y*width+x]
- mac_w[z*height*width+y*width+x]*unoccupyW[z*height*width+y*width+x];
}
}
__global__
void Adjust_MAC_u_OpenPoisson_Kernel(float* mac_u, const float* p, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x > width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
if(x == 0)
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - 0;
else if(x == width)
mac_u[z*height*(width+1)+y*(width+1)+x] -= 0 - p[z*height*width+y*width+x-1];
else
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1];
}
}
__global__
void Adjust_MAC_v_OpenPoisson_Kernel(float* mac_v, const float* p, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y > height)
return ;
for(int z = 0;z < depth;z++)
{
if(y == 0)
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - 0;
else if(y == height)
mac_v[z*(height+1)*width+y*width+x] -= 0 - p[z*height*width+(y-1)*width+x];
else
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x];
}
}
__global__
void Adjust_MAC_w_OpenPoisson_Kernel(float* mac_w, const float* p, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
mac_w[y*width+x] -= p[y*width+x] - 0;
mac_w[depth*height*width+y*width+x] -= 0 - p[(depth-1)*height*width+y*width+x];
for(int z = 1;z < depth;z++)
{
mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x];
}
}
__global__
void Adjust_MAC_u_OpenPoisson_occupy_Kernel(float* mac_u, const float* p, const bool* occupy, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x > width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
if(x == 0)
{
if(!occupy[z*height*width+y*width+x])
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - 0;
}
else if(x == width)
{
if(!occupy[z*height*width+y*width+x-1])
mac_u[z*height*(width+1)+y*(width+1)+x] -= 0 - p[z*height*width+y*width+x-1];
}
else
{
if(!occupy[z*height*width+y*width+x-1] && !occupy[z*height*width+y*width+x])
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1];
}
}
}
__global__
void Adjust_MAC_v_OpenPoisson_occupy_Kernel(float* mac_v, const float* p, const bool* occupy, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y > height)
return ;
for(int z = 0;z < depth;z++)
{
if(y == 0)
{
if(!occupy[z*height*width+y*width+x])
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - 0;
}
else if(y == height)
{
if(!occupy[z*height*width+(y-1)*width+x])
mac_v[z*(height+1)*width+y*width+x] -= 0 - p[z*height*width+(y-1)*width+x];
}
else
{
if(!occupy[z*height*width+(y-1)*width+x] && !occupy[z*height*width+y*width+x])
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x];
}
}
}
__global__
void Adjust_MAC_w_OpenPoisson_occupy_Kernel(float* mac_w, const float* p, const bool* occupy, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
if(!occupy[y*width+x])
mac_w[y*width+x] -= p[y*width+x] - 0;
if(!occupy[(depth-1)*height*width+y*width+x])
mac_w[depth*height*width+y*width+x] -= 0 - p[(depth-1)*height*width+y*width+x];
for(int z = 1;z < depth;z++)
{
if(!occupy[(z-1)*height*width+y*width+x] && !occupy[z*height*width+y*width+x])
mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x];
}
}
__global__
void Adjust_MAC_u_OpenPoisson_FaceRatio_Kernel(float* mac_u, const float* p, const float* unoccupyU, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x > width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
if(x == 0)
{
if(unoccupyU[z*height*(width+1)+y*(width+1)+x] != 0)
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - 0;
}
else if(x == width)
{
if(unoccupyU[z*height*(width+1)+y*(width+1)+x] != 0)
mac_u[z*height*(width+1)+y*(width+1)+x] -= 0 - p[z*height*width+y*width+x-1];
}
else
{
if(unoccupyU[z*height*(width+1)+y*(width+1)+x] != 0)
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1];
}
}
}
__global__
void Adjust_MAC_v_OpenPoisson_FaceRatio_Kernel(float* mac_v, const float* p, const float* unoccupyV, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y > height)
return ;
for(int z = 0;z < depth;z++)
{
if(y == 0)
{
if(unoccupyV[z*(height+1)*width+y*width+x] != 0)
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - 0;
}
else if(y == height)
{
if(unoccupyV[z*(height+1)*width+y*width+x] != 0)
mac_v[z*(height+1)*width+y*width+x] -= 0 - p[z*height*width+(y-1)*width+x];
}
else
{
if(unoccupyV[z*(height+1)*width+y*width+x] != 0)
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x];
}
}
}
__global__
void Adjust_MAC_w_OpenPoisson_FaceRatio_Kernel(float* mac_w, const float* p, const float* unoccupyW, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
if(unoccupyW[y*width+x] != 0)
mac_w[y*width+x] -= p[y*width+x] - 0;
if(unoccupyW[depth*height*width+y*width+x] != 0)
mac_w[depth*height*width+y*width+x] -= 0 - p[(depth-1)*height*width+y*width+x];
for(int z = 1;z < depth;z++)
{
if(unoccupyW[z*height*width+y*width+x] != 0)
mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x];
}
}
/*First Implementation*/
__global__
void SolvePressure_OpenPoisson_RedBlack_Kernel(float* p, const float* divergence, const int width, const int height, const int depth, const bool redkernel)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
int rest = (x+y)%2;
int start = redkernel ? rest : (1-rest);
for(int z = start; z < depth;z += 2)
{
int offset = z*height*width+y*width+x;
float coeff = 0,sigma = 0;
coeff = 6;
if(z == 0)
{
sigma += p[offset+height*width];
}
else if(z == depth-1)
{
sigma += p[offset-height*width];
}
else
{
sigma += p[offset-height*width]+p[offset+height*width];
}
if(y == 0)
{
sigma += p[offset+width];
}
else if(y == height-1)
{
sigma += p[offset-width];
}
else
{
sigma += p[offset-width]+p[offset+width];
}
if(x == 0)
{
sigma += p[offset+1];
}
else if(x == width-1)
{
sigma += p[offset-1];
}
else
{
sigma += p[offset+1]+p[offset-1];
}
sigma -= divergence[offset];
p[offset] = sigma/coeff;
}
}
__global__
void SolvePressure_OpenPoisson_occupy_RedBlack_Kernel(float* p, const float* divergence, const bool* occupy, const int width, const int height, const int depth, const bool redkernel)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
int rest = (x+y)%2;
int start = redkernel ? rest : (1-rest);
for(int z = start;z < depth;z += 2)
{
int offset = z*height*width+y*width+x;
float coeff = 0,sigma = 0;
if(occupy[offset])
{
p[offset] = 0;
continue ;
}
coeff = 6;
if(z == 0)
{
if(!occupy[offset+height*width])
sigma += p[offset+height*width];
else
coeff -= 1;
}
else if(z == depth-1)
{
if(!occupy[offset-height*width])
sigma += p[offset-height*width];
else
coeff -= 1;
}
else
{
if(!occupy[offset+height*width])
sigma += p[offset+height*width];
else
coeff -= 1;
if(!occupy[offset-height*width])
sigma += p[offset-height*width];
else
coeff -= 1;
}
if(y == 0)
{
if(!occupy[offset+width])
sigma += p[offset+width];
else
coeff -= 1;
}
else if(y == height-1)
{
if(!occupy[offset-width])
sigma += p[offset-width];
else
coeff -= 1;
}
else
{
if(!occupy[offset+width])
sigma += p[offset+width];
else
coeff -= 1;
if(!occupy[offset-width])
sigma += p[offset-width];
else
coeff -= 1;
}
if(x == 0)
{
if(!occupy[offset+1])
sigma += p[offset+1];
else
coeff -= 1;
}
else if(x == width-1)
{
if(!occupy[offset-1])
sigma += p[offset-1];
else
coeff -= 1;
}
else
{
if(!occupy[offset+1])
sigma += p[offset+1];
else
coeff -= 1;
if(!occupy[offset-1])
sigma += p[offset-1];
else
coeff -= 1;
}
sigma -= divergence[offset];
if(coeff > 0)
p[offset] = sigma/coeff;
else
p[offset] = 0;
}
}
__global__
void SolvePressure_OpenPoisson_FaceRatio_RedBlack_Kernel(float* p, const float* divergence, const bool* occupy, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW,
const int width, const int height, const int depth, const bool redkernel)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
int rest = (x+y)%2;
int start = redkernel ? rest : (1-rest);
for(int z = start;z < depth; z += 2)
{
int offset = z*height*width+y*width+x;
float coeff = 0,sigma = 0;
if(occupy[offset])
{
p[offset] = 0;
continue ;
}
if(z == 0)
{
float cur_ratio = unoccupyW[(z+1)*height*width+y*width+x];
sigma += cur_ratio*p[offset+height*width];
coeff += cur_ratio;
coeff += 1;
}
else if(z == depth-1)
{
float cur_ratio = unoccupyW[z*height*width+y*width+x];
sigma += cur_ratio*p[offset-height*width];
coeff += cur_ratio;
coeff += 1;
}
else
{
float cur_ratio = unoccupyW[(z+1)*height*width+y*width+x];
sigma += cur_ratio*p[offset+height*width];
coeff += cur_ratio;
cur_ratio = unoccupyW[z*height*width+y*width+x];
sigma += cur_ratio*p[offset-height*width];
coeff += cur_ratio;
}
if(y == 0)
{
float cur_ratio = unoccupyV[z*(height+1)*width+(y+1)*width+x];
sigma += cur_ratio*p[offset+width];
coeff += cur_ratio;
coeff += 1;
}
else if(y == height-1)
{
float cur_ratio = unoccupyV[z*(height+1)*width+y*width+x];
sigma += cur_ratio*p[offset-width];
coeff += cur_ratio;
coeff += 1;
}
else
{
float cur_ratio = unoccupyV[z*(height+1)*width+(y+1)*width+x];
sigma += cur_ratio*p[offset+width];
coeff += cur_ratio;
cur_ratio = unoccupyV[z*(height+1)*width+y*width+x];
sigma += cur_ratio*p[offset-width];
coeff += cur_ratio;
}
if(x == 0)
{
float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x+1];
sigma += cur_ratio*p[offset+1];
coeff += cur_ratio;
coeff += 1;
}
else if(x == width-1)
{
float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x];
sigma += cur_ratio*p[offset-1];
coeff += cur_ratio;
coeff += 1;
}
else
{
float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x+1];
sigma += cur_ratio*p[offset+1];
coeff += cur_ratio;
cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x];
sigma += cur_ratio*p[offset-1];
coeff += cur_ratio;
}
sigma -= divergence[offset];
if(coeff > 0)
p[offset] = sigma/coeff;
else
p[offset] = 0;
}
}
/**************************************************************/
void cu_Regular_to_MAC_vel(float* mac_u, float* mac_v, float* mac_w, const float* u, const float* v, const float* w, const int width, const int height, const int depth)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 u_gridSize((width+1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height+1+blockSize.y-1)/blockSize.y);
dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
hipLaunchKernelGGL(( Regular_to_MAC_u_Kernel), dim3(u_gridSize),dim3(blockSize), 0, 0, mac_u,u,width,height,depth);
hipLaunchKernelGGL(( Regular_to_MAC_v_Kernel), dim3(v_gridSize),dim3(blockSize), 0, 0, mac_v,v,width,height,depth);
hipLaunchKernelGGL(( Regular_to_MAC_w_Kernel), dim3(w_gridSize),dim3(blockSize), 0, 0, mac_w,w,width,height,depth);
}
void cu_MAC_to_Regular_vel(float* u, float* v, float* w, const float* mac_u, const float* mac_v, const float* mac_w, const int width, const int height, const int depth)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
hipLaunchKernelGGL(( MAC_to_Regular_vel_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,v,w,mac_u,mac_v,mac_w,width,height,depth);
}
/*First Implementation*/
void cu_SolveOpenPoissonRedBlack_MAC(float* mac_u, float* mac_v, float* mac_w, const int width, const int height, const int depth, const int maxIter)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 u_gridSize((width+1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height+1+blockSize.y-1)/blockSize.y);
dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
float* b_d = 0;
float* p_d = 0;
checkCudaErrors( hipMalloc((void**)&b_d,sizeof(float)*width*height*depth));
checkCudaErrors( hipMalloc((void**)&p_d,sizeof(float)*width*height*depth));
checkCudaErrors( hipMemset(b_d,0,sizeof(float)*width*height*depth));
checkCudaErrors( hipMemset(p_d,0,sizeof(float)*width*height*depth));
hipLaunchKernelGGL(( Calculate_Divergence_of_MAC_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, b_d,mac_u,mac_v,mac_w,width,height,depth);
for(int i = 0;i < maxIter;i++)
{
hipLaunchKernelGGL(( SolvePressure_OpenPoisson_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,width,height,depth,true);
hipLaunchKernelGGL(( SolvePressure_OpenPoisson_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,width,height,depth,false);
}
hipLaunchKernelGGL(( Adjust_MAC_u_OpenPoisson_Kernel), dim3(u_gridSize),dim3(blockSize), 0, 0, mac_u,p_d,width,height,depth);
hipLaunchKernelGGL(( Adjust_MAC_v_OpenPoisson_Kernel), dim3(v_gridSize),dim3(blockSize), 0, 0, mac_v,p_d,width,height,depth);
hipLaunchKernelGGL(( Adjust_MAC_w_OpenPoisson_Kernel), dim3(w_gridSize),dim3(blockSize), 0, 0, mac_w,p_d,width,height,depth);
checkCudaErrors( hipFree(b_d) );
checkCudaErrors( hipFree(p_d) );
b_d = 0;
p_d = 0;
}
void cu_SolveOpenPoissonRedBlack_Regular(float* u, float* v, float* w, const int width, const int height, const int depth, const int maxIter)
{
float* mac_u = 0;
float* mac_v = 0;
float* mac_w = 0;
checkCudaErrors( hipMalloc((void**)&mac_u,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( hipMalloc((void**)&mac_v,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( hipMalloc((void**)&mac_w,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( hipMemset(mac_u,0,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( hipMemset(mac_v,0,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( hipMemset(mac_w,0,sizeof(float)*width*height*(depth+1)) );
cu_Regular_to_MAC_vel(mac_u,mac_v,mac_w,u,v,w,width,height,depth);
cu_SolveOpenPoissonRedBlack_MAC(mac_u,mac_v,mac_w,width,height,depth,maxIter);
cu_MAC_to_Regular_vel(u,v,w,mac_u,mac_v,mac_w,width,height,depth);
checkCudaErrors( hipFree(mac_u) );
checkCudaErrors( hipFree(mac_v) );
checkCudaErrors( hipFree(mac_w) );
mac_u = 0;
mac_v = 0;
mac_w = 0;
}
void cu_SolveOpenPoissonRedBlackwithOccupy_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const int width, const int height, const int depth, const int maxIter)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 u_gridSize((width+1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height+1+blockSize.y-1)/blockSize.y);
dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
float* b_d = 0;
float* p_d = 0;
checkCudaErrors( hipMalloc((void**)&b_d,sizeof(float)*width*height*depth));
checkCudaErrors( hipMalloc((void**)&p_d,sizeof(float)*width*height*depth));
checkCudaErrors( hipMemset(b_d,0,sizeof(float)*width*height*depth));
checkCudaErrors( hipMemset(p_d,0,sizeof(float)*width*height*depth));
hipLaunchKernelGGL(( Calculate_Divergence_of_MAC_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, b_d,mac_u,mac_v,mac_w,width,height,depth);
for(int i = 0;i < maxIter;i++)
{
hipLaunchKernelGGL(( SolvePressure_OpenPoisson_occupy_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,occupy,width,height,depth,true);
hipLaunchKernelGGL(( SolvePressure_OpenPoisson_occupy_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,occupy,width,height,depth,false);
}
hipLaunchKernelGGL(( Adjust_MAC_u_OpenPoisson_occupy_Kernel), dim3(u_gridSize),dim3(blockSize), 0, 0, mac_u,p_d,occupy,width,height,depth);
hipLaunchKernelGGL(( Adjust_MAC_v_OpenPoisson_occupy_Kernel), dim3(v_gridSize),dim3(blockSize), 0, 0, mac_v,p_d,occupy,width,height,depth);
hipLaunchKernelGGL(( Adjust_MAC_w_OpenPoisson_occupy_Kernel), dim3(w_gridSize),dim3(blockSize), 0, 0, mac_w,p_d,occupy,width,height,depth);
checkCudaErrors( hipFree(b_d) );
checkCudaErrors( hipFree(p_d) );
b_d = 0;
p_d = 0;
}
void cu_SolveOpenPoissonRedBlackwithFaceRatio_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW,
const int width ,const int height, const int depth, const int maxIter)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 u_gridSize((width+1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height+1+blockSize.y-1)/blockSize.y);
dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
float* b_d = 0;
float* p_d = 0;
checkCudaErrors( hipMalloc((void**)&b_d,sizeof(float)*width*height*depth));
checkCudaErrors( hipMalloc((void**)&p_d,sizeof(float)*width*height*depth));
checkCudaErrors( hipMemset(b_d,0,sizeof(float)*width*height*depth));
checkCudaErrors( hipMemset(p_d,0,sizeof(float)*width*height*depth));
hipLaunchKernelGGL(( Calculate_Divergence_of_MAC_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, b_d,mac_u,mac_v,mac_w,width,height,depth);
for(int i = 0;i < maxIter;i++)
{
hipLaunchKernelGGL(( SolvePressure_OpenPoisson_FaceRatio_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,occupy,unoccupyU,unoccupyV,unoccupyW,width,height,depth,true);
hipLaunchKernelGGL(( SolvePressure_OpenPoisson_FaceRatio_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, p_d,b_d,occupy,unoccupyU,unoccupyV,unoccupyW,width,height,depth,false);
}
hipLaunchKernelGGL(( Adjust_MAC_u_OpenPoisson_FaceRatio_Kernel), dim3(u_gridSize),dim3(blockSize), 0, 0, mac_u,p_d,unoccupyU,width,height,depth);
hipLaunchKernelGGL(( Adjust_MAC_v_OpenPoisson_FaceRatio_Kernel), dim3(v_gridSize),dim3(blockSize), 0, 0, mac_v,p_d,unoccupyV,width,height,depth);
hipLaunchKernelGGL(( Adjust_MAC_w_OpenPoisson_FaceRatio_Kernel), dim3(w_gridSize),dim3(blockSize), 0, 0, mac_w,p_d,unoccupyW,width,height,depth);
checkCudaErrors( hipFree(b_d) );
checkCudaErrors( hipFree(p_d) );
b_d = 0;
p_d = 0;
}
/*****************************************************************************/
/*First Implementation*/
extern "C"
void SolveOpenPoissonRedBlack3D_MAC(float* mac_u, float* mac_v, float* mac_w, const int width, const int height, const int depth, const int maxIter)
{
float* mac_u_d = 0;
float* mac_v_d = 0;
float* mac_w_d = 0;
checkCudaErrors( hipMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( hipMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( hipMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( hipMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),hipMemcpyHostToDevice) );
cu_SolveOpenPoissonRedBlack_MAC(mac_u_d,mac_v_d,mac_w_d,width,height,depth,maxIter);
checkCudaErrors( hipMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),hipMemcpyDeviceToHost) );
checkCudaErrors( hipFree(mac_u_d) );
checkCudaErrors( hipFree(mac_v_d) );
checkCudaErrors( hipFree(mac_w_d) );
mac_u_d = 0;
mac_v_d = 0;
mac_w_d = 0;
}
extern "C"
void SolveOpenPoissonRedBlack3D_Regular(float* u, float* v, float* w, const int width, const int height, const int depth, const int maxIter)
{
float* u_d = 0;
float* v_d = 0;
float* w_d = 0;
checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height*depth) );
checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height*depth) );
checkCudaErrors( hipMalloc((void**)&w_d,sizeof(float)*width*height*depth) );
checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(w_d,w,sizeof(float)*width*height*depth,hipMemcpyHostToDevice) );
cu_SolveOpenPoissonRedBlack_Regular(u_d,v_d,w_d,width,height,depth,maxIter);
checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(w,w_d,sizeof(float)*width*height*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipFree(u_d) );
checkCudaErrors( hipFree(v_d) );
checkCudaErrors( hipFree(w_d) );
u_d = 0;
v_d = 0;
w_d = 0;
}
extern "C"
void SolveOpenPoissonRedBlackwithOccupy3D_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const int width, const int height, const int depth, const int maxIter)
{
float* mac_u_d = 0;
float* mac_v_d = 0;
float* mac_w_d = 0;
bool* occupy_d = 0;
checkCudaErrors( hipMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( hipMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( hipMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( hipMalloc((void**)&occupy_d,sizeof(bool)*width*height*height) );
checkCudaErrors( hipMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(occupy_d,occupy,sizeof(bool)*width*height*depth,hipMemcpyHostToDevice) );
cu_SolveOpenPoissonRedBlackwithOccupy_MAC(mac_u_d,mac_v_d,mac_w_d,occupy_d,width,height,depth,maxIter);
checkCudaErrors( hipMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),hipMemcpyDeviceToHost) );
checkCudaErrors( hipFree(mac_u_d) );
checkCudaErrors( hipFree(mac_v_d) );
checkCudaErrors( hipFree(mac_w_d) );
checkCudaErrors( hipFree(occupy_d) );
mac_u_d = 0;
mac_v_d = 0;
mac_w_d = 0;
occupy_d = 0;
}
extern "C"
void SolveOpenPoissonRedBlackwithFaceRatio3D_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW,
const int width, const int height, const int depth, const int maxIter)
{
float* mac_u_d = 0;
float* mac_v_d = 0;
float* mac_w_d = 0;
bool* occupy_d = 0;
float* unoccupyU_d = 0;
float* unoccupyV_d = 0;
float* unoccupyW_d = 0;
checkCudaErrors( hipMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( hipMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( hipMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( hipMalloc((void**)&occupy_d,sizeof(bool)*width*height*depth) );
checkCudaErrors( hipMalloc((void**)&unoccupyU_d,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( hipMalloc((void**)&unoccupyV_d,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( hipMalloc((void**)&unoccupyW_d,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( hipMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(occupy_d,occupy,sizeof(bool)*width*height*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(unoccupyU_d,unoccupyU,sizeof(float)*(width+1)*height*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(unoccupyV_d,unoccupyV,sizeof(float)*width*(height+1)*depth,hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(unoccupyW_d,unoccupyW,sizeof(float)*width*height*(depth+1),hipMemcpyHostToDevice) );
cu_SolveOpenPoissonRedBlackwithFaceRatio_MAC(mac_u_d,mac_v_d,mac_w_d,occupy_d,unoccupyU_d,unoccupyV_d,unoccupyW_d,width,height,depth,maxIter);
checkCudaErrors( hipMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),hipMemcpyDeviceToHost) );
checkCudaErrors( hipFree(mac_u_d) );
checkCudaErrors( hipFree(mac_v_d) );
checkCudaErrors( hipFree(mac_w_d) );
checkCudaErrors( hipFree(occupy_d) );
checkCudaErrors( hipFree(unoccupyU_d) );
checkCudaErrors( hipFree(unoccupyV_d) );
checkCudaErrors( hipFree(unoccupyW_d) );
mac_u_d = 0;
mac_v_d = 0;
mac_w_d = 0;
occupy_d = 0;
unoccupyU_d = 0;
unoccupyV_d = 0;
unoccupyW_d = 0;
}
}
#endif | e55ce1c9f56389d9810f077103a0bf76be578ce0.cu | #ifndef _ZQ_CUDA_POISSON_SOLVER_3D_CU_
#define _ZQ_CUDA_POISSON_SOLVER_3D_CU_
#include "ZQ_CUDA_PoissonSolver3D.cuh"
#include "ZQ_CUDA_ImageProcessing3D.cuh"
namespace ZQ_CUDA_PoissonSolver3D
{
__global__
void Regular_to_MAC_u_Kernel(float* mac_u, const float* u, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x > width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
if(x == 0)
mac_u[z*height*(width+1)+y*(width+1)+x] = u[z*height*width+y*width+x];
else if(x == width)
mac_u[z*height*(width+1)+y*(width+1)+x] = u[z*height*width+y*width+x-1];
else
mac_u[z*height*(width+1)+y*(width+1)+x] = 0.5f*(u[z*height*width+y*width+x-1]+u[z*height*width+y*width+x]);
}
}
__global__
void Regular_to_MAC_v_Kernel(float* mac_v, const float* v, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y > height)
return ;
for(int z = 0;z < depth;z++)
{
if(y == 0)
mac_v[z*(height+1)*width+y*width+x] = v[z*height*width+y*width+x];
else if(y == height)
mac_v[z*(height+1)*width+y*width+x] = v[z*height*width+(y-1)*width+x];
else
mac_v[z*(height+1)*width+y*width+x] = 0.5f*(v[z*height*width+(y-1)*width+x]+v[z*height*width+y*width+x]);
}
}
__global__
void Regular_to_MAC_w_Kernel(float* mac_w, const float* w, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y > height)
return ;
mac_w[y*width+x] = w[y*width+x];
mac_w[depth*height*width+y*width+x] = w[(depth-1)*height*width+x];
for(int z = 1;z < depth;z++)
{
mac_w[z*height*width+y*width+x] = 0.5f*(w[(z-1)*height*width+y*width+x]+w[z*height*width+y*width+x]);
}
}
__global__
void MAC_to_Regular_vel_Kernel(float* u, float* v, float* w, const float* mac_u, const float* mac_v, const float* mac_w, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
u[z*height*width+y*width+x] = 0.5f*(mac_u[z*height*(width+1)+y*(width+1)+x]+mac_u[z*height*(width+1)+y*(width+1)+x+1]);
v[z*height*width+y*width+x] = 0.5f*(mac_v[z*(height+1)*width+y*width+x]+mac_v[z*(height+1)*width+(y+1)*width+x]);
w[z*height*width+y*width+x] = 0.5f*(mac_w[z*height*width+y*width+x]+mac_w[(z+1)*height*width+y*width+x]);
}
}
__global__
void Calculate_Divergence_of_MAC_Kernel(float* divergence, const float* mac_u, const float* mac_v, const float* mac_w, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
divergence[z*height*width+y*width+x] = mac_u[z*height*(width+1)+y*(width+1)+x+1] - mac_u[z*height*(width+1)+y*(width+1)+x]
+ mac_v[z*(height+1)*width+(y+1)*width+x] - mac_v[z*(height+1)*width+y*width+x]
+ mac_w[(z+1)*height*width+y*width+x] - mac_w[z*height*width+y*width+x];
}
}
__global__
void Calculate_Divergence_of_MAC_FaceRatio_Kernel(float* divergence, const float* mac_u, const float* mac_v, const float* mac_w,
const float* unoccupyU, const float* unoccupyV, const float* unoccupyW,
const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
divergence[z*height*width+y*width+x] =
mac_u[z*height*(width+1)+y*(width+1)+x+1]*unoccupyU[z*height*(width+1)+y*(width+1)+x+1]
- mac_u[z*height*(width+1)+y*(width+1)+x]*unoccupyU[z*height*(width+1)+y*(width+1)+x]
+ mac_v[z*(height+1)*width+(y+1)*width+x]*unoccupyV[z*(height+1)*width+(y+1)*width+x]
- mac_v[z*(height+1)*width+y*width+x]*unoccupyV[z*(height+1)*width+y*width+x]
+ mac_w[(z+1)*height*width+y*width+x]*unoccupyW[(z+1)*height*width+y*width+x]
- mac_w[z*height*width+y*width+x]*unoccupyW[z*height*width+y*width+x];
}
}
__global__
void Adjust_MAC_u_OpenPoisson_Kernel(float* mac_u, const float* p, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x > width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
if(x == 0)
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - 0;
else if(x == width)
mac_u[z*height*(width+1)+y*(width+1)+x] -= 0 - p[z*height*width+y*width+x-1];
else
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1];
}
}
__global__
void Adjust_MAC_v_OpenPoisson_Kernel(float* mac_v, const float* p, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y > height)
return ;
for(int z = 0;z < depth;z++)
{
if(y == 0)
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - 0;
else if(y == height)
mac_v[z*(height+1)*width+y*width+x] -= 0 - p[z*height*width+(y-1)*width+x];
else
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x];
}
}
__global__
void Adjust_MAC_w_OpenPoisson_Kernel(float* mac_w, const float* p, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
mac_w[y*width+x] -= p[y*width+x] - 0;
mac_w[depth*height*width+y*width+x] -= 0 - p[(depth-1)*height*width+y*width+x];
for(int z = 1;z < depth;z++)
{
mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x];
}
}
__global__
void Adjust_MAC_u_OpenPoisson_occupy_Kernel(float* mac_u, const float* p, const bool* occupy, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x > width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
if(x == 0)
{
if(!occupy[z*height*width+y*width+x])
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - 0;
}
else if(x == width)
{
if(!occupy[z*height*width+y*width+x-1])
mac_u[z*height*(width+1)+y*(width+1)+x] -= 0 - p[z*height*width+y*width+x-1];
}
else
{
if(!occupy[z*height*width+y*width+x-1] && !occupy[z*height*width+y*width+x])
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1];
}
}
}
__global__
void Adjust_MAC_v_OpenPoisson_occupy_Kernel(float* mac_v, const float* p, const bool* occupy, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y > height)
return ;
for(int z = 0;z < depth;z++)
{
if(y == 0)
{
if(!occupy[z*height*width+y*width+x])
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - 0;
}
else if(y == height)
{
if(!occupy[z*height*width+(y-1)*width+x])
mac_v[z*(height+1)*width+y*width+x] -= 0 - p[z*height*width+(y-1)*width+x];
}
else
{
if(!occupy[z*height*width+(y-1)*width+x] && !occupy[z*height*width+y*width+x])
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x];
}
}
}
__global__
void Adjust_MAC_w_OpenPoisson_occupy_Kernel(float* mac_w, const float* p, const bool* occupy, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
if(!occupy[y*width+x])
mac_w[y*width+x] -= p[y*width+x] - 0;
if(!occupy[(depth-1)*height*width+y*width+x])
mac_w[depth*height*width+y*width+x] -= 0 - p[(depth-1)*height*width+y*width+x];
for(int z = 1;z < depth;z++)
{
if(!occupy[(z-1)*height*width+y*width+x] && !occupy[z*height*width+y*width+x])
mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x];
}
}
__global__
void Adjust_MAC_u_OpenPoisson_FaceRatio_Kernel(float* mac_u, const float* p, const float* unoccupyU, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x > width || y >= height)
return ;
for(int z = 0;z < depth;z++)
{
if(x == 0)
{
if(unoccupyU[z*height*(width+1)+y*(width+1)+x] != 0)
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - 0;
}
else if(x == width)
{
if(unoccupyU[z*height*(width+1)+y*(width+1)+x] != 0)
mac_u[z*height*(width+1)+y*(width+1)+x] -= 0 - p[z*height*width+y*width+x-1];
}
else
{
if(unoccupyU[z*height*(width+1)+y*(width+1)+x] != 0)
mac_u[z*height*(width+1)+y*(width+1)+x] -= p[z*height*width+y*width+x] - p[z*height*width+y*width+x-1];
}
}
}
__global__
void Adjust_MAC_v_OpenPoisson_FaceRatio_Kernel(float* mac_v, const float* p, const float* unoccupyV, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y > height)
return ;
for(int z = 0;z < depth;z++)
{
if(y == 0)
{
if(unoccupyV[z*(height+1)*width+y*width+x] != 0)
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - 0;
}
else if(y == height)
{
if(unoccupyV[z*(height+1)*width+y*width+x] != 0)
mac_v[z*(height+1)*width+y*width+x] -= 0 - p[z*height*width+(y-1)*width+x];
}
else
{
if(unoccupyV[z*(height+1)*width+y*width+x] != 0)
mac_v[z*(height+1)*width+y*width+x] -= p[z*height*width+y*width+x] - p[z*height*width+(y-1)*width+x];
}
}
}
__global__
void Adjust_MAC_w_OpenPoisson_FaceRatio_Kernel(float* mac_w, const float* p, const float* unoccupyW, const int width, const int height, const int depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
if(unoccupyW[y*width+x] != 0)
mac_w[y*width+x] -= p[y*width+x] - 0;
if(unoccupyW[depth*height*width+y*width+x] != 0)
mac_w[depth*height*width+y*width+x] -= 0 - p[(depth-1)*height*width+y*width+x];
for(int z = 1;z < depth;z++)
{
if(unoccupyW[z*height*width+y*width+x] != 0)
mac_w[z*height*width+y*width+x] -= p[z*height*width+y*width+x] - p[(z-1)*height*width+y*width+x];
}
}
/*First Implementation*/
__global__
void SolvePressure_OpenPoisson_RedBlack_Kernel(float* p, const float* divergence, const int width, const int height, const int depth, const bool redkernel)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
int rest = (x+y)%2;
int start = redkernel ? rest : (1-rest);
for(int z = start; z < depth;z += 2)
{
int offset = z*height*width+y*width+x;
float coeff = 0,sigma = 0;
coeff = 6;
if(z == 0)
{
sigma += p[offset+height*width];
}
else if(z == depth-1)
{
sigma += p[offset-height*width];
}
else
{
sigma += p[offset-height*width]+p[offset+height*width];
}
if(y == 0)
{
sigma += p[offset+width];
}
else if(y == height-1)
{
sigma += p[offset-width];
}
else
{
sigma += p[offset-width]+p[offset+width];
}
if(x == 0)
{
sigma += p[offset+1];
}
else if(x == width-1)
{
sigma += p[offset-1];
}
else
{
sigma += p[offset+1]+p[offset-1];
}
sigma -= divergence[offset];
p[offset] = sigma/coeff;
}
}
__global__
void SolvePressure_OpenPoisson_occupy_RedBlack_Kernel(float* p, const float* divergence, const bool* occupy, const int width, const int height, const int depth, const bool redkernel)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
int rest = (x+y)%2;
int start = redkernel ? rest : (1-rest);
for(int z = start;z < depth;z += 2)
{
int offset = z*height*width+y*width+x;
float coeff = 0,sigma = 0;
if(occupy[offset])
{
p[offset] = 0;
continue ;
}
coeff = 6;
if(z == 0)
{
if(!occupy[offset+height*width])
sigma += p[offset+height*width];
else
coeff -= 1;
}
else if(z == depth-1)
{
if(!occupy[offset-height*width])
sigma += p[offset-height*width];
else
coeff -= 1;
}
else
{
if(!occupy[offset+height*width])
sigma += p[offset+height*width];
else
coeff -= 1;
if(!occupy[offset-height*width])
sigma += p[offset-height*width];
else
coeff -= 1;
}
if(y == 0)
{
if(!occupy[offset+width])
sigma += p[offset+width];
else
coeff -= 1;
}
else if(y == height-1)
{
if(!occupy[offset-width])
sigma += p[offset-width];
else
coeff -= 1;
}
else
{
if(!occupy[offset+width])
sigma += p[offset+width];
else
coeff -= 1;
if(!occupy[offset-width])
sigma += p[offset-width];
else
coeff -= 1;
}
if(x == 0)
{
if(!occupy[offset+1])
sigma += p[offset+1];
else
coeff -= 1;
}
else if(x == width-1)
{
if(!occupy[offset-1])
sigma += p[offset-1];
else
coeff -= 1;
}
else
{
if(!occupy[offset+1])
sigma += p[offset+1];
else
coeff -= 1;
if(!occupy[offset-1])
sigma += p[offset-1];
else
coeff -= 1;
}
sigma -= divergence[offset];
if(coeff > 0)
p[offset] = sigma/coeff;
else
p[offset] = 0;
}
}
__global__
void SolvePressure_OpenPoisson_FaceRatio_RedBlack_Kernel(float* p, const float* divergence, const bool* occupy, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW,
const int width, const int height, const int depth, const bool redkernel)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x >= width || y >= height)
return ;
int rest = (x+y)%2;
int start = redkernel ? rest : (1-rest);
for(int z = start;z < depth; z += 2)
{
int offset = z*height*width+y*width+x;
float coeff = 0,sigma = 0;
if(occupy[offset])
{
p[offset] = 0;
continue ;
}
if(z == 0)
{
float cur_ratio = unoccupyW[(z+1)*height*width+y*width+x];
sigma += cur_ratio*p[offset+height*width];
coeff += cur_ratio;
coeff += 1;
}
else if(z == depth-1)
{
float cur_ratio = unoccupyW[z*height*width+y*width+x];
sigma += cur_ratio*p[offset-height*width];
coeff += cur_ratio;
coeff += 1;
}
else
{
float cur_ratio = unoccupyW[(z+1)*height*width+y*width+x];
sigma += cur_ratio*p[offset+height*width];
coeff += cur_ratio;
cur_ratio = unoccupyW[z*height*width+y*width+x];
sigma += cur_ratio*p[offset-height*width];
coeff += cur_ratio;
}
if(y == 0)
{
float cur_ratio = unoccupyV[z*(height+1)*width+(y+1)*width+x];
sigma += cur_ratio*p[offset+width];
coeff += cur_ratio;
coeff += 1;
}
else if(y == height-1)
{
float cur_ratio = unoccupyV[z*(height+1)*width+y*width+x];
sigma += cur_ratio*p[offset-width];
coeff += cur_ratio;
coeff += 1;
}
else
{
float cur_ratio = unoccupyV[z*(height+1)*width+(y+1)*width+x];
sigma += cur_ratio*p[offset+width];
coeff += cur_ratio;
cur_ratio = unoccupyV[z*(height+1)*width+y*width+x];
sigma += cur_ratio*p[offset-width];
coeff += cur_ratio;
}
if(x == 0)
{
float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x+1];
sigma += cur_ratio*p[offset+1];
coeff += cur_ratio;
coeff += 1;
}
else if(x == width-1)
{
float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x];
sigma += cur_ratio*p[offset-1];
coeff += cur_ratio;
coeff += 1;
}
else
{
float cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x+1];
sigma += cur_ratio*p[offset+1];
coeff += cur_ratio;
cur_ratio = unoccupyU[z*height*(width+1)+y*(width+1)+x];
sigma += cur_ratio*p[offset-1];
coeff += cur_ratio;
}
sigma -= divergence[offset];
if(coeff > 0)
p[offset] = sigma/coeff;
else
p[offset] = 0;
}
}
/**************************************************************/
void cu_Regular_to_MAC_vel(float* mac_u, float* mac_v, float* mac_w, const float* u, const float* v, const float* w, const int width, const int height, const int depth)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 u_gridSize((width+1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height+1+blockSize.y-1)/blockSize.y);
dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
Regular_to_MAC_u_Kernel<<<u_gridSize,blockSize>>>(mac_u,u,width,height,depth);
Regular_to_MAC_v_Kernel<<<v_gridSize,blockSize>>>(mac_v,v,width,height,depth);
Regular_to_MAC_w_Kernel<<<w_gridSize,blockSize>>>(mac_w,w,width,height,depth);
}
void cu_MAC_to_Regular_vel(float* u, float* v, float* w, const float* mac_u, const float* mac_v, const float* mac_w, const int width, const int height, const int depth)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
MAC_to_Regular_vel_Kernel<<<gridSize,blockSize>>>(u,v,w,mac_u,mac_v,mac_w,width,height,depth);
}
/*First Implementation*/
void cu_SolveOpenPoissonRedBlack_MAC(float* mac_u, float* mac_v, float* mac_w, const int width, const int height, const int depth, const int maxIter)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 u_gridSize((width+1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height+1+blockSize.y-1)/blockSize.y);
dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
float* b_d = 0;
float* p_d = 0;
checkCudaErrors( cudaMalloc((void**)&b_d,sizeof(float)*width*height*depth));
checkCudaErrors( cudaMalloc((void**)&p_d,sizeof(float)*width*height*depth));
checkCudaErrors( cudaMemset(b_d,0,sizeof(float)*width*height*depth));
checkCudaErrors( cudaMemset(p_d,0,sizeof(float)*width*height*depth));
Calculate_Divergence_of_MAC_Kernel<<<gridSize,blockSize>>>(b_d,mac_u,mac_v,mac_w,width,height,depth);
for(int i = 0;i < maxIter;i++)
{
SolvePressure_OpenPoisson_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,width,height,depth,true);
SolvePressure_OpenPoisson_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,width,height,depth,false);
}
Adjust_MAC_u_OpenPoisson_Kernel<<<u_gridSize,blockSize>>>(mac_u,p_d,width,height,depth);
Adjust_MAC_v_OpenPoisson_Kernel<<<v_gridSize,blockSize>>>(mac_v,p_d,width,height,depth);
Adjust_MAC_w_OpenPoisson_Kernel<<<w_gridSize,blockSize>>>(mac_w,p_d,width,height,depth);
checkCudaErrors( cudaFree(b_d) );
checkCudaErrors( cudaFree(p_d) );
b_d = 0;
p_d = 0;
}
void cu_SolveOpenPoissonRedBlack_Regular(float* u, float* v, float* w, const int width, const int height, const int depth, const int maxIter)
{
float* mac_u = 0;
float* mac_v = 0;
float* mac_w = 0;
checkCudaErrors( cudaMalloc((void**)&mac_u,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( cudaMalloc((void**)&mac_v,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( cudaMalloc((void**)&mac_w,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( cudaMemset(mac_u,0,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( cudaMemset(mac_v,0,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( cudaMemset(mac_w,0,sizeof(float)*width*height*(depth+1)) );
cu_Regular_to_MAC_vel(mac_u,mac_v,mac_w,u,v,w,width,height,depth);
cu_SolveOpenPoissonRedBlack_MAC(mac_u,mac_v,mac_w,width,height,depth,maxIter);
cu_MAC_to_Regular_vel(u,v,w,mac_u,mac_v,mac_w,width,height,depth);
checkCudaErrors( cudaFree(mac_u) );
checkCudaErrors( cudaFree(mac_v) );
checkCudaErrors( cudaFree(mac_w) );
mac_u = 0;
mac_v = 0;
mac_w = 0;
}
void cu_SolveOpenPoissonRedBlackwithOccupy_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const int width, const int height, const int depth, const int maxIter)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 u_gridSize((width+1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height+1+blockSize.y-1)/blockSize.y);
dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
float* b_d = 0;
float* p_d = 0;
checkCudaErrors( cudaMalloc((void**)&b_d,sizeof(float)*width*height*depth));
checkCudaErrors( cudaMalloc((void**)&p_d,sizeof(float)*width*height*depth));
checkCudaErrors( cudaMemset(b_d,0,sizeof(float)*width*height*depth));
checkCudaErrors( cudaMemset(p_d,0,sizeof(float)*width*height*depth));
Calculate_Divergence_of_MAC_Kernel<<<gridSize,blockSize>>>(b_d,mac_u,mac_v,mac_w,width,height,depth);
for(int i = 0;i < maxIter;i++)
{
SolvePressure_OpenPoisson_occupy_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,occupy,width,height,depth,true);
SolvePressure_OpenPoisson_occupy_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,occupy,width,height,depth,false);
}
Adjust_MAC_u_OpenPoisson_occupy_Kernel<<<u_gridSize,blockSize>>>(mac_u,p_d,occupy,width,height,depth);
Adjust_MAC_v_OpenPoisson_occupy_Kernel<<<v_gridSize,blockSize>>>(mac_v,p_d,occupy,width,height,depth);
Adjust_MAC_w_OpenPoisson_occupy_Kernel<<<w_gridSize,blockSize>>>(mac_w,p_d,occupy,width,height,depth);
checkCudaErrors( cudaFree(b_d) );
checkCudaErrors( cudaFree(p_d) );
b_d = 0;
p_d = 0;
}
void cu_SolveOpenPoissonRedBlackwithFaceRatio_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW,
const int width ,const int height, const int depth, const int maxIter)
{
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 u_gridSize((width+1+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
dim3 v_gridSize((width+blockSize.x-1)/blockSize.x,(height+1+blockSize.y-1)/blockSize.y);
dim3 w_gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y);
float* b_d = 0;
float* p_d = 0;
checkCudaErrors( cudaMalloc((void**)&b_d,sizeof(float)*width*height*depth));
checkCudaErrors( cudaMalloc((void**)&p_d,sizeof(float)*width*height*depth));
checkCudaErrors( cudaMemset(b_d,0,sizeof(float)*width*height*depth));
checkCudaErrors( cudaMemset(p_d,0,sizeof(float)*width*height*depth));
Calculate_Divergence_of_MAC_Kernel<<<gridSize,blockSize>>>(b_d,mac_u,mac_v,mac_w,width,height,depth);
for(int i = 0;i < maxIter;i++)
{
SolvePressure_OpenPoisson_FaceRatio_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,occupy,unoccupyU,unoccupyV,unoccupyW,width,height,depth,true);
SolvePressure_OpenPoisson_FaceRatio_RedBlack_Kernel<<<gridSize,blockSize>>>(p_d,b_d,occupy,unoccupyU,unoccupyV,unoccupyW,width,height,depth,false);
}
Adjust_MAC_u_OpenPoisson_FaceRatio_Kernel<<<u_gridSize,blockSize>>>(mac_u,p_d,unoccupyU,width,height,depth);
Adjust_MAC_v_OpenPoisson_FaceRatio_Kernel<<<v_gridSize,blockSize>>>(mac_v,p_d,unoccupyV,width,height,depth);
Adjust_MAC_w_OpenPoisson_FaceRatio_Kernel<<<w_gridSize,blockSize>>>(mac_w,p_d,unoccupyW,width,height,depth);
checkCudaErrors( cudaFree(b_d) );
checkCudaErrors( cudaFree(p_d) );
b_d = 0;
p_d = 0;
}
/*****************************************************************************/
/*First Implementation*/
extern "C"
void SolveOpenPoissonRedBlack3D_MAC(float* mac_u, float* mac_v, float* mac_w, const int width, const int height, const int depth, const int maxIter)
{
float* mac_u_d = 0;
float* mac_v_d = 0;
float* mac_w_d = 0;
checkCudaErrors( cudaMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( cudaMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( cudaMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( cudaMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),cudaMemcpyHostToDevice) );
cu_SolveOpenPoissonRedBlack_MAC(mac_u_d,mac_v_d,mac_w_d,width,height,depth,maxIter);
checkCudaErrors( cudaMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaFree(mac_u_d) );
checkCudaErrors( cudaFree(mac_v_d) );
checkCudaErrors( cudaFree(mac_w_d) );
mac_u_d = 0;
mac_v_d = 0;
mac_w_d = 0;
}
extern "C"
void SolveOpenPoissonRedBlack3D_Regular(float* u, float* v, float* w, const int width, const int height, const int depth, const int maxIter)
{
float* u_d = 0;
float* v_d = 0;
float* w_d = 0;
checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height*depth) );
checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height*depth) );
checkCudaErrors( cudaMalloc((void**)&w_d,sizeof(float)*width*height*depth) );
checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(w_d,w,sizeof(float)*width*height*depth,cudaMemcpyHostToDevice) );
cu_SolveOpenPoissonRedBlack_Regular(u_d,v_d,w_d,width,height,depth,maxIter);
checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(w,w_d,sizeof(float)*width*height*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaFree(u_d) );
checkCudaErrors( cudaFree(v_d) );
checkCudaErrors( cudaFree(w_d) );
u_d = 0;
v_d = 0;
w_d = 0;
}
extern "C"
void SolveOpenPoissonRedBlackwithOccupy3D_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const int width, const int height, const int depth, const int maxIter)
{
float* mac_u_d = 0;
float* mac_v_d = 0;
float* mac_w_d = 0;
bool* occupy_d = 0;
checkCudaErrors( cudaMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( cudaMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( cudaMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( cudaMalloc((void**)&occupy_d,sizeof(bool)*width*height*height) );
checkCudaErrors( cudaMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(occupy_d,occupy,sizeof(bool)*width*height*depth,cudaMemcpyHostToDevice) );
cu_SolveOpenPoissonRedBlackwithOccupy_MAC(mac_u_d,mac_v_d,mac_w_d,occupy_d,width,height,depth,maxIter);
checkCudaErrors( cudaMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaFree(mac_u_d) );
checkCudaErrors( cudaFree(mac_v_d) );
checkCudaErrors( cudaFree(mac_w_d) );
checkCudaErrors( cudaFree(occupy_d) );
mac_u_d = 0;
mac_v_d = 0;
mac_w_d = 0;
occupy_d = 0;
}
extern "C"
void SolveOpenPoissonRedBlackwithFaceRatio3D_MAC(float* mac_u, float* mac_v, float* mac_w, const bool* occupy, const float* unoccupyU, const float* unoccupyV, const float* unoccupyW,
const int width, const int height, const int depth, const int maxIter)
{
float* mac_u_d = 0;
float* mac_v_d = 0;
float* mac_w_d = 0;
bool* occupy_d = 0;
float* unoccupyU_d = 0;
float* unoccupyV_d = 0;
float* unoccupyW_d = 0;
checkCudaErrors( cudaMalloc((void**)&mac_u_d,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( cudaMalloc((void**)&mac_v_d,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( cudaMalloc((void**)&mac_w_d,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( cudaMalloc((void**)&occupy_d,sizeof(bool)*width*height*depth) );
checkCudaErrors( cudaMalloc((void**)&unoccupyU_d,sizeof(float)*(width+1)*height*depth) );
checkCudaErrors( cudaMalloc((void**)&unoccupyV_d,sizeof(float)*width*(height+1)*depth) );
checkCudaErrors( cudaMalloc((void**)&unoccupyW_d,sizeof(float)*width*height*(depth+1)) );
checkCudaErrors( cudaMemcpy(mac_u_d,mac_u,sizeof(float)*(width+1)*height*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(mac_v_d,mac_v,sizeof(float)*width*(height+1)*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(mac_w_d,mac_w,sizeof(float)*width*height*(depth+1),cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(occupy_d,occupy,sizeof(bool)*width*height*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(unoccupyU_d,unoccupyU,sizeof(float)*(width+1)*height*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(unoccupyV_d,unoccupyV,sizeof(float)*width*(height+1)*depth,cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(unoccupyW_d,unoccupyW,sizeof(float)*width*height*(depth+1),cudaMemcpyHostToDevice) );
cu_SolveOpenPoissonRedBlackwithFaceRatio_MAC(mac_u_d,mac_v_d,mac_w_d,occupy_d,unoccupyU_d,unoccupyV_d,unoccupyW_d,width,height,depth,maxIter);
checkCudaErrors( cudaMemcpy(mac_u,mac_u_d,sizeof(float)*(width+1)*height*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(mac_v,mac_v_d,sizeof(float)*width*(height+1)*depth,cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(mac_w,mac_w_d,sizeof(float)*width*height*(depth+1),cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaFree(mac_u_d) );
checkCudaErrors( cudaFree(mac_v_d) );
checkCudaErrors( cudaFree(mac_w_d) );
checkCudaErrors( cudaFree(occupy_d) );
checkCudaErrors( cudaFree(unoccupyU_d) );
checkCudaErrors( cudaFree(unoccupyV_d) );
checkCudaErrors( cudaFree(unoccupyW_d) );
mac_u_d = 0;
mac_v_d = 0;
mac_w_d = 0;
occupy_d = 0;
unoccupyU_d = 0;
unoccupyV_d = 0;
unoccupyW_d = 0;
}
}
#endif |
1d2c2ea88188fb9070854dec4c1090427174909e.hip | // !!! This is a file automatically generated by hipify!!!
#include "learn_kernels.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
__global__ void kMultiplyBySigmoidGrad(float* act, float* target, const unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(unsigned int i = idx; i < len; i+= numThreads) {
target[i] = target[i] * act[i] * (1.0f - act[i]);
}
}
| 1d2c2ea88188fb9070854dec4c1090427174909e.cu | #include "learn_kernels.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
__global__ void kMultiplyBySigmoidGrad(float* act, float* target, const unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(unsigned int i = idx; i < len; i+= numThreads) {
target[i] = target[i] * act[i] * (1.0f - act[i]);
}
}
|
538f2ff8e5be4da65da70f64fff5d68079f5c140.hip | // !!! This is a file automatically generated by hipify!!!
#include "common_hip.cuh"
#include "random_hip.cuh"
rtDeclareVariable(float, offset, , );
rtDeclareVariable(float, windowWidth, , );
rtBuffer<Light> lights;
rtTextureSampler<float4, 2> sampler;
rtDeclareVariable(float3, org, , );
rtDeclareVariable(float4, ambient, , );
rtDeclareVariable(float4, specular, , );
rtDeclareVariable(float4, diffuse, , );
rtDeclareVariable(float, shineExponent, , );
rtDeclareVariable(float, reflectIntensity, , );
rtDeclareVariable(float, refractIndex, , );
rtDeclareVariable(float4, sceneAmbient, , );
rtDeclareVariable(rtObject, sceneObj, , );
rtDeclareVariable(int, enableReflections, , );
rtDeclareVariable(int, maxReflections, , );
rtDeclareVariable(int, enableRefractions, , );
rtDeclareVariable(int, maxRefractions, , );
rtDeclareVariable(int, enableAo, , );
rtDeclareVariable(float, aoRadius, , );
rtDeclareVariable(float, aoPower, , );
rtDeclareVariable(int, aoSamples, , );
rtDeclareVariable(int, aoSamplesSqrt, , );
rtTextureSampler<float4, 2> aoNoise;
rtDeclareVariable(float, aoNoiseScale, , );
rtDeclareVariable(float3, normal, attribute normal, );
rtDeclareVariable(float2, texCoord, attribute texCoord, );
rtDeclareVariable(Ray, ray, rtCurrentRay, );
rtDeclareVariable(RayColorData, curColorData, rtPayload, );
rtDeclareVariable(RayShadowData, curShadowData, rtPayload, );
rtDeclareVariable(uint2, launchIndex, rtLaunchIndex, );
rtDeclareVariable(uint2, launchDim, rtLaunchDim, );
RT_PROGRAM void anyHit() {
//curShadowData.attenuation *= 1.f - diffuse.w * tex2D(sampler, texCoord.x, texCoord.y).w;
curShadowData.attenuation = 0.f;
if (curShadowData.attenuation == 0.f)
rtTerminateRay();
}
RT_PROGRAM void closestHit() {
// Set hit properties
float3 hitPos = ray.origin + ray.tmax * ray.direction;
float3 toEye = normalize(org - hitPos);
// Calculate color
float4 texture = diffuse * tex2D(sampler, texCoord.x, texCoord.y);
float transparency = 1.f - texture.w;
float4 totalDiffuse, totalSpecular, totalReflect, totalRefract;
totalDiffuse = totalSpecular = totalReflect = totalRefract = make_float4(0.f);
//// Light contribution ////
for (int i = 0; i < lights.size(); i++) {
Light light = lights[i];
float3 incidence = normalize(light.position - hitPos);
// Calculate attenuation (falloff)
float distance = length(light.position - hitPos);
float attenuation = max(1.f - distance / light.range, 0.f);
if (attenuation > 0.0) {
// Cast ray to find blocking objects
RayShadowData shadowData;
shadowData.attenuation = attenuation;
Ray shadowRay(hitPos, incidence, 1, 0.01f, distance);
rtTrace(sceneObj, shadowRay, shadowData);
// The ray was not fully absorbed, add light contribution
if (shadowData.attenuation > 0.f) {
// Diffuse factor
float diffuseFactor = max(dot(normal, incidence), 0.f) * shadowData.attenuation;
totalDiffuse += diffuseFactor * light.color;
// Specular factor
if (shineExponent > 0.f) {
float3 reflection = -reflect(incidence, normal);
float specularFactor = pow(max(dot(reflection, toEye), 0.f), shineExponent) * shadowData.attenuation;
totalSpecular += specularFactor * specular;
}
}
}
}
//// Reflection ////
if (enableReflections && reflectIntensity > 0.f && curColorData.reflectDepth < maxReflections) {
float3 reflectVector = reflect(ray.direction, normal);
RayColorData reflectData;
reflectData.reflectDepth = curColorData.reflectDepth + 1;
reflectData.refractDepth = curColorData.refractDepth;
Ray reflectRay(hitPos, reflectVector, 0, 0.01f);
rtTrace(sceneObj, reflectRay, reflectData);
totalReflect = reflectData.result * reflectIntensity;
}
//// Refraction ////
if (enableRefractions && transparency > 0.f && curColorData.refractDepth < maxRefractions) {
float3 refractVector;
if (!refract(refractVector, ray.direction, normal, refractIndex))
refractVector = ray.direction;
RayColorData refractData;
refractData.reflectDepth = curColorData.reflectDepth;
refractData.refractDepth = curColorData.refractDepth + 1;
Ray refractRay(hitPos, refractVector, 0, 0.01f);
rtTrace(sceneObj, refractRay, refractData);
totalRefract = refractData.result * transparency;
}
/// Ambient occlusion ////
float occluded = 0.f;
if (enableAo) {
float invSamples = 1.f / aoSamples;
float invSamplesSqrt = 1.f / aoSamplesSqrt;
float2 noiseTexCoord = (make_float2(launchIndex) / make_float2(launchDim)) * aoNoiseScale;
float4 noise = tex2D(aoNoise, noiseTexCoord.x, noiseTexCoord.y);
Onb onb(normal);
for (int i = 0; i < aoSamples; i++) {
float u1 = (float(i % aoSamplesSqrt) + noise.x) * invSamplesSqrt;
float u2 = (float(i / aoSamplesSqrt) + noise.y) * invSamplesSqrt;
float3 sampleVector;
cosine_sample_hemisphere(u1, u2, sampleVector);
onb.inverse_transform(sampleVector);
RayShadowData sampleData;
sampleData.attenuation = 1.f;
Ray sampleRay(hitPos, sampleVector, 1, 0.01f, aoRadius);
rtTrace(sceneObj, sampleRay, sampleData);
occluded += 1.f - sampleData.attenuation;
}
occluded *= invSamples * aoPower;
}
// Create color
curColorData.result = texture * (sceneAmbient + ambient + totalDiffuse) * (1.f - occluded) * (1.f - transparency) + totalSpecular + totalReflect + totalRefract;
curColorData.result.w = 1.f;
}
| 538f2ff8e5be4da65da70f64fff5d68079f5c140.cu | #include "common.cuh"
#include "random.cuh"
rtDeclareVariable(float, offset, , );
rtDeclareVariable(float, windowWidth, , );
rtBuffer<Light> lights;
rtTextureSampler<float4, 2> sampler;
rtDeclareVariable(float3, org, , );
rtDeclareVariable(float4, ambient, , );
rtDeclareVariable(float4, specular, , );
rtDeclareVariable(float4, diffuse, , );
rtDeclareVariable(float, shineExponent, , );
rtDeclareVariable(float, reflectIntensity, , );
rtDeclareVariable(float, refractIndex, , );
rtDeclareVariable(float4, sceneAmbient, , );
rtDeclareVariable(rtObject, sceneObj, , );
rtDeclareVariable(int, enableReflections, , );
rtDeclareVariable(int, maxReflections, , );
rtDeclareVariable(int, enableRefractions, , );
rtDeclareVariable(int, maxRefractions, , );
rtDeclareVariable(int, enableAo, , );
rtDeclareVariable(float, aoRadius, , );
rtDeclareVariable(float, aoPower, , );
rtDeclareVariable(int, aoSamples, , );
rtDeclareVariable(int, aoSamplesSqrt, , );
rtTextureSampler<float4, 2> aoNoise;
rtDeclareVariable(float, aoNoiseScale, , );
rtDeclareVariable(float3, normal, attribute normal, );
rtDeclareVariable(float2, texCoord, attribute texCoord, );
rtDeclareVariable(Ray, ray, rtCurrentRay, );
rtDeclareVariable(RayColorData, curColorData, rtPayload, );
rtDeclareVariable(RayShadowData, curShadowData, rtPayload, );
rtDeclareVariable(uint2, launchIndex, rtLaunchIndex, );
rtDeclareVariable(uint2, launchDim, rtLaunchDim, );
RT_PROGRAM void anyHit() {
//curShadowData.attenuation *= 1.f - diffuse.w * tex2D(sampler, texCoord.x, texCoord.y).w;
curShadowData.attenuation = 0.f;
if (curShadowData.attenuation == 0.f)
rtTerminateRay();
}
RT_PROGRAM void closestHit() {
// Set hit properties
float3 hitPos = ray.origin + ray.tmax * ray.direction;
float3 toEye = normalize(org - hitPos);
// Calculate color
float4 texture = diffuse * tex2D(sampler, texCoord.x, texCoord.y);
float transparency = 1.f - texture.w;
float4 totalDiffuse, totalSpecular, totalReflect, totalRefract;
totalDiffuse = totalSpecular = totalReflect = totalRefract = make_float4(0.f);
//// Light contribution ////
for (int i = 0; i < lights.size(); i++) {
Light light = lights[i];
float3 incidence = normalize(light.position - hitPos);
// Calculate attenuation (falloff)
float distance = length(light.position - hitPos);
float attenuation = max(1.f - distance / light.range, 0.f);
if (attenuation > 0.0) {
// Cast ray to find blocking objects
RayShadowData shadowData;
shadowData.attenuation = attenuation;
Ray shadowRay(hitPos, incidence, 1, 0.01f, distance);
rtTrace(sceneObj, shadowRay, shadowData);
// The ray was not fully absorbed, add light contribution
if (shadowData.attenuation > 0.f) {
// Diffuse factor
float diffuseFactor = max(dot(normal, incidence), 0.f) * shadowData.attenuation;
totalDiffuse += diffuseFactor * light.color;
// Specular factor
if (shineExponent > 0.f) {
float3 reflection = -reflect(incidence, normal);
float specularFactor = pow(max(dot(reflection, toEye), 0.f), shineExponent) * shadowData.attenuation;
totalSpecular += specularFactor * specular;
}
}
}
}
//// Reflection ////
if (enableReflections && reflectIntensity > 0.f && curColorData.reflectDepth < maxReflections) {
float3 reflectVector = reflect(ray.direction, normal);
RayColorData reflectData;
reflectData.reflectDepth = curColorData.reflectDepth + 1;
reflectData.refractDepth = curColorData.refractDepth;
Ray reflectRay(hitPos, reflectVector, 0, 0.01f);
rtTrace(sceneObj, reflectRay, reflectData);
totalReflect = reflectData.result * reflectIntensity;
}
//// Refraction ////
if (enableRefractions && transparency > 0.f && curColorData.refractDepth < maxRefractions) {
float3 refractVector;
if (!refract(refractVector, ray.direction, normal, refractIndex))
refractVector = ray.direction;
RayColorData refractData;
refractData.reflectDepth = curColorData.reflectDepth;
refractData.refractDepth = curColorData.refractDepth + 1;
Ray refractRay(hitPos, refractVector, 0, 0.01f);
rtTrace(sceneObj, refractRay, refractData);
totalRefract = refractData.result * transparency;
}
/// Ambient occlusion ////
float occluded = 0.f;
if (enableAo) {
float invSamples = 1.f / aoSamples;
float invSamplesSqrt = 1.f / aoSamplesSqrt;
float2 noiseTexCoord = (make_float2(launchIndex) / make_float2(launchDim)) * aoNoiseScale;
float4 noise = tex2D(aoNoise, noiseTexCoord.x, noiseTexCoord.y);
Onb onb(normal);
for (int i = 0; i < aoSamples; i++) {
float u1 = (float(i % aoSamplesSqrt) + noise.x) * invSamplesSqrt;
float u2 = (float(i / aoSamplesSqrt) + noise.y) * invSamplesSqrt;
float3 sampleVector;
cosine_sample_hemisphere(u1, u2, sampleVector);
onb.inverse_transform(sampleVector);
RayShadowData sampleData;
sampleData.attenuation = 1.f;
Ray sampleRay(hitPos, sampleVector, 1, 0.01f, aoRadius);
rtTrace(sceneObj, sampleRay, sampleData);
occluded += 1.f - sampleData.attenuation;
}
occluded *= invSamples * aoPower;
}
// Create color
curColorData.result = texture * (sceneAmbient + ambient + totalDiffuse) * (1.f - occluded) * (1.f - transparency) + totalSpecular + totalReflect + totalRefract;
curColorData.result.w = 1.f;
}
|
db48378bbfaeca15e883f6c363df9526182ae6ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
The "wb.h" file(library) has been included in this code
First compile dataset_generator.cpp. You may use any no. of pixels in the x and y dimensions.
The dataset_generator will output input.ppm and output.ppm
Compile this file using "./a.out input.ppm output.ppm"
*/
#include "wb.h"
#define BLUR_SIZE 5
//@@ INSERT CODE HERE
__global__ void ImageBlurKernel(float* deviceInputImageData,float* deviceOutputImageData,int imageHeight,int imageWidth)
{
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
/*Error checking is required because matrices need not fit in exact tiles*/
if(row<imageHeight && col<imageWidth)
{
float pixVal = 0;
int pixels = 0;
for(int blurrow=-BLUR_SIZE;blurrow<BLUR_SIZE+1;blurrow++)
{
for (int blurcol = -BLUR_SIZE; blurcol < BLUR_SIZE + 1;blurcol++)
{
int currow = row + blurrow;
int curcol = col + blurcol;
if(currow>-1 && currow<imageHeight && curcol>-1 && curcol<imageWidth)
{
pixVal+=deviceInputImageData[currow*imageWidth+curcol];
pixels++;
}
}
}
deviceOutputImageData[row*imageWidth+col]=float(pixVal)/pixels;
}
}
int main(int argc, char *argv[]) {
int imageWidth;
int imageHeight;
char *inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceInputImageData;
float *deviceOutputImageData;
wbArg_t args;
args = wbArg_read(argc, argv);
/* parse the input arguments */
//@@ Insert code here
inputImageFile = wbArg_getInputFile(args, 0);
inputImage = wbImport(inputImageFile);
// The input image is in grayscale, so the number of channels
// is 1
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
// Since the image is monochromatic, it only contains only one channel
outputImage = wbImage_new(imageWidth, imageHeight, 3);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputImageData,
imageWidth * imageHeight * sizeof(float));
hipMalloc((void **)&deviceOutputImageData,
imageWidth * imageHeight * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * sizeof(float),
hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 dimBlock(32,32);
dim3 dimGrid(ceil(imageWidth/32.0),ceil(imageHeight/32.0));
hipLaunchKernelGGL(( ImageBlurKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceInputImageData,deviceOutputImageData,imageHeight,imageWidth);
wbTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * sizeof(float),
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(args, outputImage);
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
| db48378bbfaeca15e883f6c363df9526182ae6ba.cu | /*
The "wb.h" file(library) has been included in this code
First compile dataset_generator.cpp. You may use any no. of pixels in the x and y dimensions.
The dataset_generator will output input.ppm and output.ppm
Compile this file using "./a.out input.ppm output.ppm"
*/
#include "wb.h"
#define BLUR_SIZE 5
//@@ INSERT CODE HERE
__global__ void ImageBlurKernel(float* deviceInputImageData,float* deviceOutputImageData,int imageHeight,int imageWidth)
{
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
/*Error checking is required because matrices need not fit in exact tiles*/
if(row<imageHeight && col<imageWidth)
{
float pixVal = 0;
int pixels = 0;
for(int blurrow=-BLUR_SIZE;blurrow<BLUR_SIZE+1;blurrow++)
{
for (int blurcol = -BLUR_SIZE; blurcol < BLUR_SIZE + 1;blurcol++)
{
int currow = row + blurrow;
int curcol = col + blurcol;
if(currow>-1 && currow<imageHeight && curcol>-1 && curcol<imageWidth)
{
pixVal+=deviceInputImageData[currow*imageWidth+curcol];
pixels++;
}
}
}
deviceOutputImageData[row*imageWidth+col]=float(pixVal)/pixels;
}
}
int main(int argc, char *argv[]) {
int imageWidth;
int imageHeight;
char *inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *deviceInputImageData;
float *deviceOutputImageData;
wbArg_t args;
args = wbArg_read(argc, argv);
/* parse the input arguments */
//@@ Insert code here
inputImageFile = wbArg_getInputFile(args, 0);
inputImage = wbImport(inputImageFile);
// The input image is in grayscale, so the number of channels
// is 1
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
// Since the image is monochromatic, it only contains only one channel
outputImage = wbImage_new(imageWidth, imageHeight, 3);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputImageData,
imageWidth * imageHeight * sizeof(float));
cudaMalloc((void **)&deviceOutputImageData,
imageWidth * imageHeight * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * sizeof(float),
cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
wbTime_start(Compute, "Doing the computation on the GPU");
dim3 dimBlock(32,32);
dim3 dimGrid(ceil(imageWidth/32.0),ceil(imageHeight/32.0));
ImageBlurKernel<<<dimGrid,dimBlock>>>(deviceInputImageData,deviceOutputImageData,imageHeight,imageWidth);
wbTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * sizeof(float),
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(args, outputImage);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
|
b852f0c63a9cab409df7aa522a7d5127a22d6595.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TILE_SIZE 32
__device__ unsigned int Idx3D(int nx, int ny, int i, int j, int k) {
return k*nx*ny + j*nx + i;
}
__global__ void stencilKernel(int *A0, int *Anext, int nx, int ny, int nz) {
__shared__ float ds_A[TILE_SIZE][TILE_SIZE];
unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y;
unsigned int dx = blockDim.x; unsigned int dy = blockDim.y;
unsigned int i = blockIdx.x * dx + tx; unsigned int j = blcokIdx.y * dy + ty;
if((i < nx) && (j < ny)) {
float bottom = A0[Idx3D(nx, ny, i, j, 0)];
float center = A0[Idx3D(nx, ny, i, j, 1)];
float top = A0[Idx3D(nx, ny, i, j, 2)];
for(int k = 1; k < nz-1; k++) {
ds_A[ty][tx] = cewnter;
__syncthreads();
Anext[Idx3D(nx, ny, i, j, k)] = bottom + top - 6 * center +
((tx>0) ? ds_A[ty][tx-1] : (i==0) ? 0 : A0[Idx3d(nx,ny,i-1,j,k)]) +
((tx<dx-1) ? ds_A[ty][tx+1] : (i==nx-1) ? 0 : A0[Idx3d(nx,ny,i+1,j,k)]) +
((ty>0) ? ds_A[ty-1][tx] : (i==0) ? 0 : A0[Idx3d(nx,ny,i,j-1,k)]) +
((ty<dy-1) ? ds_A[ty+1][tx] : (i==ny-1) ? 0 : A0[Idx3d(nx,ny,i,j+1,k)]);
bottom = center;
center = top;
__syncthreads();
if(k + 2 < nz) top = A0[Idx3D(nx, ny, i, j, k+2)];
}
}
}
| b852f0c63a9cab409df7aa522a7d5127a22d6595.cu | #define TILE_SIZE 32
__device__ unsigned int Idx3D(int nx, int ny, int i, int j, int k) {
return k*nx*ny + j*nx + i;
}
__global__ void stencilKernel(int *A0, int *Anext, int nx, int ny, int nz) {
__shared__ float ds_A[TILE_SIZE][TILE_SIZE];
unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y;
unsigned int dx = blockDim.x; unsigned int dy = blockDim.y;
unsigned int i = blockIdx.x * dx + tx; unsigned int j = blcokIdx.y * dy + ty;
if((i < nx) && (j < ny)) {
float bottom = A0[Idx3D(nx, ny, i, j, 0)];
float center = A0[Idx3D(nx, ny, i, j, 1)];
float top = A0[Idx3D(nx, ny, i, j, 2)];
for(int k = 1; k < nz-1; k++) {
ds_A[ty][tx] = cewnter;
__syncthreads();
Anext[Idx3D(nx, ny, i, j, k)] = bottom + top - 6 * center +
((tx>0) ? ds_A[ty][tx-1] : (i==0) ? 0 : A0[Idx3d(nx,ny,i-1,j,k)]) +
((tx<dx-1) ? ds_A[ty][tx+1] : (i==nx-1) ? 0 : A0[Idx3d(nx,ny,i+1,j,k)]) +
((ty>0) ? ds_A[ty-1][tx] : (i==0) ? 0 : A0[Idx3d(nx,ny,i,j-1,k)]) +
((ty<dy-1) ? ds_A[ty+1][tx] : (i==ny-1) ? 0 : A0[Idx3d(nx,ny,i,j+1,k)]);
bottom = center;
center = top;
__syncthreads();
if(k + 2 < nz) top = A0[Idx3D(nx, ny, i, j, k+2)];
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.