hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
671fb9d9ac0d6940c97ffbfd0cacfece74c2e38d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* TGSimulator.cpp
*
* Created on: 22/mag/2018
* Author: Sabrina
*/
#include "TGSimulator.h"
#include <thrust/system/hip/execution_policy.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
TGSimulator::TGSimulator(){
// TODO Auto-generated constructor stub
}
TGSimulator::TGSimulator(unsigned int npart): m_numCells(npart) {
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&m_VCells, npart*sizeof(point4d));
hipMallocManaged(&m_VType, npart*sizeof(int));
hipDeviceSynchronize();
}
TGSimulator::TGSimulator(char * fileName, bool scaleNumber){
bool debugAdCell = false;
bool checkBorder=false;
bool checkGrid=false;
//////////////
//initialize data from vtk file
//////////////
std::ifstream fileCoord, fileData;
fileCoord.open(fileName);
std::string line;
std::string a, b, c;
for(int i=0; i<4 ; i++)getline(fileCoord,line);
fileCoord >> a >> b >> c;
//get total number of cells
m_numCells = std::atoi(b.c_str());
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&m_VCells, m_numCells*sizeof(point4d));
hipMallocManaged(&m_VType, m_numCells*sizeof(int));
hipDeviceSynchronize();
//ponter on radius value
fileData.open(fileName);
std::string line2, r;
std::string word("radius");
std::size_t found;
do{
getline(fileData,line2);
found = line2.find(word);
}while(found==std::string::npos);
getline(fileData,line2);
fileData >> r;
//initialize vector with cells'data
fileCoord >> a >> b >> c;
int i = 0;
while(a !="POINT_DATA"){
if(scaleNumber){ m_VCells[i]=point4d(atof(a.c_str())*pow(10,-6),atof(b.c_str())*pow(10,-6),atof(c.c_str())*pow(10,-6),atof(r.c_str())*pow(10,-6));}
else m_VCells[i]=point4d(atof(a.c_str()),atof(b.c_str()),atof(c.c_str()),atof(r.c_str()));
if (checkGrid) std::cout << "( " << atof(a.c_str()) << ", " << atof(b.c_str()) << ", " << atof(c.c_str()) << ", " << atof(r.c_str()) << ")" << std::endl;
fileCoord >> a >> b >> c;
fileData >> r;
i++;
}
//initialize type cells vector with a default value
for(int i = 0; i < m_numCells; i++){
m_VType[i] = 0;
}
}
TGSimulator:: TGSimulator(unsigned int npart, std::vector<double>& x, std::vector<double>& y ,std::vector<double>& z ,std::vector<double>& r ,std::vector<int>& t):
m_numCells(npart) {
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&m_VCells, npart*sizeof(point4d));
hipMallocManaged(&m_VType, npart*sizeof(int));
hipDeviceSynchronize();
// Initialize data
for(int i = 0; i < npart; i++){
m_VCells[i] = point4d(x[i],y[i],z[i],r[i]);
m_VType[i] = t[i];
}
}
TGSimulator::~TGSimulator() {
// Free memory
hipFree(m_VCells);
hipFree(m_VType);
}
/*void TGSimulator::Set_Npart(unsigned int npart)
{
m_numCells = npart;
}*/
/*
thrust::host_vector<point4d>& TGSimulator::Get_dTohCells(){
m_hCells = m_dCells; //(il trasferimento dati DtoH richiede un tempo spropositato)
return m_hCells;
}
void TGSimulator::Get_dCells(std::vector<point4d>& hCells){
m_hCells = m_dCells; //! attenzione fare solo se necessario.
for(int i = 0; i < m_numCells; i++)
{
hCells[i] = m_hCells[i];
}
//non funziona!!
//thrust::copy(m_hCells.begin(), m_hCells.end(), hCells.begin());
return ;
}
*/
void TGSimulator::PrintCellsPosition(){
for(int i = 0; i < m_numCells; i++) {
std::cout << "Cell" << i <<" " ;
m_VCells[i].printCoord();
}
}
| 671fb9d9ac0d6940c97ffbfd0cacfece74c2e38d.cu | /*
* TGSimulator.cpp
*
* Created on: 22/mag/2018
* Author: Sabrina
*/
#include "TGSimulator.h"
#include <thrust/system/cuda/execution_policy.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
TGSimulator::TGSimulator(){
// TODO Auto-generated constructor stub
}
TGSimulator::TGSimulator(unsigned int npart): m_numCells(npart) {
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&m_VCells, npart*sizeof(point4d));
cudaMallocManaged(&m_VType, npart*sizeof(int));
cudaDeviceSynchronize();
}
TGSimulator::TGSimulator(char * fileName, bool scaleNumber){
bool debugAdCell = false;
bool checkBorder=false;
bool checkGrid=false;
//////////////
//initialize data from vtk file
//////////////
std::ifstream fileCoord, fileData;
fileCoord.open(fileName);
std::string line;
std::string a, b, c;
for(int i=0; i<4 ; i++)getline(fileCoord,line);
fileCoord >> a >> b >> c;
//get total number of cells
m_numCells = std::atoi(b.c_str());
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&m_VCells, m_numCells*sizeof(point4d));
cudaMallocManaged(&m_VType, m_numCells*sizeof(int));
cudaDeviceSynchronize();
//ponter on radius value
fileData.open(fileName);
std::string line2, r;
std::string word("radius");
std::size_t found;
do{
getline(fileData,line2);
found = line2.find(word);
}while(found==std::string::npos);
getline(fileData,line2);
fileData >> r;
//initialize vector with cells'data
fileCoord >> a >> b >> c;
int i = 0;
while(a !="POINT_DATA"){
if(scaleNumber){ m_VCells[i]=point4d(atof(a.c_str())*pow(10,-6),atof(b.c_str())*pow(10,-6),atof(c.c_str())*pow(10,-6),atof(r.c_str())*pow(10,-6));}
else m_VCells[i]=point4d(atof(a.c_str()),atof(b.c_str()),atof(c.c_str()),atof(r.c_str()));
if (checkGrid) std::cout << "( " << atof(a.c_str()) << ", " << atof(b.c_str()) << ", " << atof(c.c_str()) << ", " << atof(r.c_str()) << ")" << std::endl;
fileCoord >> a >> b >> c;
fileData >> r;
i++;
}
//initialize type cells vector with a default value
for(int i = 0; i < m_numCells; i++){
m_VType[i] = 0;
}
}
TGSimulator:: TGSimulator(unsigned int npart, std::vector<double>& x, std::vector<double>& y ,std::vector<double>& z ,std::vector<double>& r ,std::vector<int>& t):
m_numCells(npart) {
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&m_VCells, npart*sizeof(point4d));
cudaMallocManaged(&m_VType, npart*sizeof(int));
cudaDeviceSynchronize();
// Initialize data
for(int i = 0; i < npart; i++){
m_VCells[i] = point4d(x[i],y[i],z[i],r[i]);
m_VType[i] = t[i];
}
}
TGSimulator::~TGSimulator() {
// Free memory
cudaFree(m_VCells);
cudaFree(m_VType);
}
/*void TGSimulator::Set_Npart(unsigned int npart)
{
m_numCells = npart;
}*/
/*
thrust::host_vector<point4d>& TGSimulator::Get_dTohCells(){
m_hCells = m_dCells; //(il trasferimento dati DtoH richiede un tempo spropositato)
return m_hCells;
}
void TGSimulator::Get_dCells(std::vector<point4d>& hCells){
m_hCells = m_dCells; //! attenzione fare solo se necessario.
for(int i = 0; i < m_numCells; i++)
{
hCells[i] = m_hCells[i];
}
//non funziona!!
//thrust::copy(m_hCells.begin(), m_hCells.end(), hCells.begin());
return ;
}
*/
void TGSimulator::PrintCellsPosition(){
for(int i = 0; i < m_numCells; i++) {
std::cout << "Cell" << i <<" " ;
m_VCells[i].printCoord();
}
}
|
2154065b14e0abd0be6c7e2fcab88278d4d71f70.hip | // !!! This is a file automatically generated by hipify!!!
//MPChQ,
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <fstream>
#include "book.h"
#include "gridcheck.h"
using namespace std;
# define Section 12 // number of cooling sections
# define CoolSection 8
# define MoldSection 4
# define StaticIter 50
# define M 3
# define N M+2*CoolSection
# define TestIter 500
# define limit 501//limit>=tnpts/num_iter
float ccml[Section + 1] = { 0.0,0.2,0.4,0.6,0.8,1.0925,2.27,4.29,5.831,9.6065,13.6090,19.87014,28.599 }; // The cooling sections
//float H_Init[Section] = { 1380,1170,980,800,1223.16,735.05,424.32,392.83,328.94,281.64,246.16,160.96 }; // The heat transfer coefficients in the cooling sections
float H_Init[Section] = { 1400,1200,1000,800,1200,750,400,400,350,300,250,150 };
//float H_Init[Section] = { 1500,1300,1100,900,1300,850,500,500,450,400,350,250 };
//float H_Init_Temp[Section] = { 1380,1170,980,800,1223.16,735.05,424.32,392.83,328.94,281.64,246.16,160.96 }; // The heat transfer coefficients in the cooling sections
float H_Init_Temp[Section] = { 0 };
float H_Init_Final[Section] = { 1380 };
float Q_air[CoolSection] = { 200,1500,850,650,1000,850,400,480 };
float Taim[CoolSection] = { 966.149841, 925.864746, 952.322083, 932.175537, 914.607117, 890.494263, 870.804443, 890.595825 };
float delta_z[Section] = {2.7,2.7,1.8,1.8,1.8,1.8,1.8,0.9};
float *Calculation_MeanTemperature(int nx, int ny, int nz, float dy, float *ccml, float *T, float num);
float *calculateThickness(float *T_result, int nx, int ny, int nz, float dy, float *ccml, float Ts, float thick);
hipError_t addWithCuda(float *T_Init, float dx, float dy, float dz, float tao, int nx, int ny, int nz, int tnpts, int num_blocks, int num_threadsx, int num_threadsy);
__device__ void Physicial_Parameters(float T, float *pho, float *Ce, float *lamd);
__device__ float Boundary_Condition(int j, float dx, float *ccml_zone, float *H_Init);
float *relationshiphandQ(float *h_Init, float* Q_air);
float stop_criterion();
float update_c(float[], float c0,int iter);
void update_lamda(float[],int iter,float[]);
float alfa[limit] = { 1.0 };
float g[N] = { 0 };
float testArray[TestIter] = { 0 };
__global__ void addKernel(float *T_New, float *T_Last, float *ccml, float *H_Init, float dx, float dy, float dz, float tao, int nx, int ny, int nz, bool disout,float Vcast)
{
int i = threadIdx.x;
int m = threadIdx.y;
int j = blockIdx.x;
int idx = j * nx * nz + m * nx + i;
int ND = nx * nz;
int D = nx;
float pho, Ce, lamd; // physical parameters pho represents desity, Ce is specific heat and lamd is thermal conductivity
float a, T_Up, T_Down, T_Right, T_Left, T_Forw, T_Back, h = 100.0, Tw = 30.0, T_Cast = 1558.0; //Vcast = -0.02
if (disout) {
Physicial_Parameters(T_Last[idx], &pho, &Ce, &lamd);
a = (lamd) / (pho*Ce);
h = Boundary_Condition(j, dy, ccml, H_Init);
if (j == 0) //1
{
T_New[idx] = T_Cast;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m != 0 && m != (nz - 1)) //10
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = (a*tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ (a*tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m != 0 && m != (nz - 1)) //11
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m != 0 && m != (nz - 1)) //12
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx - 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m == 0) //13
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m == (nz - 1)) //14
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m == 0) //15
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m == (nz - 1)) //16
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m == 0) //17
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx - 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m == (nz - 1)) //18
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx - 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i != 0 && i != (nx - 1) && m == 0) //19
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D] - 2 * dz * h * (T_Last[idx] - Tw) / lamd;
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i != 0 && i != (nx - 1) && m == (nz - 1)) //20
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D] - 2 * dz * h * (T_Last[idx] - Tw) / lamd;
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m == 0) //21
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m == 0) //22
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx - 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m == (nz - 1)) //23
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m == (nz - 1)) //24
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx - 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m != 0 && m != (nz - 1)) //25
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1] - 2 * dx * h * (T_Last[idx] - Tw) / lamd;
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m != 0 && m != (nz - 1)) //26
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx - 1] - 2 * dx * h * (T_Last[idx] - Tw) / lamd;
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else //27
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
}
else
{
Physicial_Parameters(T_New[idx], &pho, &Ce, &lamd);
a = (lamd) / (pho*Ce);
h = Boundary_Condition(j, dy, ccml, H_Init);
if (j == 0) //1
{
T_Last[idx] = T_Cast;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m != 0 && m != (nz - 1)) //10
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m != 0 && m != (nz - 1)) //11
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m != 0 && m != (nz - 1)) //12
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx - 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m == 0) //13
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m == (nz - 1)) //14
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m == 0) //15
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m == (nz - 1)) //16
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m == 0) //17
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx - 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m == (nz - 1)) //18
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx - 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i != 0 && i != (nx - 1) && m == 0) //19
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D] - 2 * dz * h * (T_Last[idx] - Tw) / lamd;
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i != 0 && i != (nx - 1) && m == (nz - 1)) //20
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D] - 2 * dz * h * (T_Last[idx] - Tw) / lamd;
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m == 0) //21
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m == 0) //22
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx - 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m == (nz - 1)) //23
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m == (nz - 1)) //24
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx - 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m != 0 && m != (nz - 1)) //25
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1] - 2 * dx * h * (T_New[idx] - Tw) / lamd;
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m != 0 && m != (nz - 1)) //26
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx - 1] - 2 * dx * h * (T_New[idx] - Tw) / lamd;
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else //27
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
}
}
int main()
{
const int nx = 21, ny = 3000, nz = 21; // nx is the number of grid in x direction, ny is the number of grid in y direction.
int num_blocks = 1, num_threadsx = 1, num_threadsy = 1;// num_threadsz = 1; // block number(1D) thread number in x and y dimension(2D)
int tnpts = 10001; // time step
float Lx = 0.25, Ly = 28.599, Lz = 0.25, t_final = 2000.0, dx, dy, dz, tao; // T_Cast is the casting temperature Lx and Ly is the thick and length of steel billets
float T_Cast = 1758.0;//T_Cast = 1558.0,
float *T_Init;
num_threadsx = nx;
num_threadsy = nz;
num_blocks = ny;
T_Init = (float*)calloc(nx*ny*nz,sizeof(float)); // Initial condition
for (int m = 0; m < nz; m++)
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
T_Init[nx * ny * m + j * nx + i] = T_Cast; // give the initial condition
dx = Lx / (nx - 1); // the grid size x
dy = Ly / (ny - 1); // the grid size y
dz = Lz / (nz - 1); // the grid size z
tao = t_final / (tnpts - 1); // the time step size
//gridcheck(dx, dy, tao);
cout << "Casting Temperature " << T_Cast << endl;
cout << "The length of steel billets(m) " << Ly << endl;
cout << "The width of steel billets(m) " << Lz << endl;
cout << "The thick of steel billets(m) " << Lx << endl;
cout << "dx(m) " << dx << ", ";
cout << "dy(m) " << dy << ", ";
cout << "dz(m) " << dz << ", ";
cout << "tao(s) " << tao << ", ";
cout << "simulation time(s) " << t_final << endl;
//clock_t timestart = clock();
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
hipError_t cudaStatus = addWithCuda(T_Init, dx, dy, dz, tao, nx, ny, nz, tnpts, num_blocks, num_threadsx, num_threadsy);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
float elapsetime;
HANDLE_ERROR(hipEventElapsedTime(&elapsetime, start, stop));
cout << "running time =" << (elapsetime);
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
/*clock_t timeend = clock();
cout << "running time = " << (timeend - timestart);*/
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
hipError_t addWithCuda(float *T_Init, float dx, float dy, float dz, float tao, int nx, int ny, int nz, int tnpts, int num_blocks, int num_threadsx, int num_threadsy)
{
float *dev_T_New, *dev_T_Last, *dev_ccml, *dev_H_Init; // the point on GPU
float *T_Result, *Delta_H_Init, *T_HoldLast,*ThickAll, **Mean_TSurfaceElement, **Mean_TSurfaceElementOne;
float *Point_TSurfaceElement, *Point_TSurfaceElementOne, **Mean_TCenterElement, **Mean_TCenterElementOne;
float **JacobianMatrix, *JacobianG0, *JacobianG1, *JacobianG2, *TZ_gradient,*partionQ;
float **JacobinTZgradient, **TZ_gradientElement, **TZ_gradientElementOne;
float dh = 10.0,dQ=1.0, arf1, arf2, step = -0.0001,T_bmax=1100,Ts=1462,Tl= 1518.0,Tu=-100,Td=200;
float Vcast = -0.02;
const int Num_Iter = 10, PrintLabel = 0;// The result can be obtained by every Num_Iter time step
volatile bool dstOut = true;
//
float c[limit] = {10};
float norm_g[limit] = { 0 };
float eps = 0.0001,c0 = 10;
float lamda[limit][N] = { 1 };
float gtest[limit][N] = { 0 };
float htest[limit][Section] = { 0 };
float fitness[limit] = { 0 };
T_Result = (float *)calloc(nx * ny * nz, sizeof(float)); // The temperature of steel billets
Delta_H_Init = (float*)calloc(CoolSection, sizeof(float));
T_HoldLast = (float*)calloc(nz * ny * nx, sizeof(float));
Point_TSurfaceElement = (float*)calloc(CoolSection, sizeof(float));
Point_TSurfaceElementOne = (float*)calloc(CoolSection, sizeof(float));
JacobianG0 = (float*)calloc(CoolSection, sizeof(float));
JacobianG1 = (float*)calloc(CoolSection, sizeof(float));
JacobianG2 = (float*)calloc(CoolSection, sizeof(float));
TZ_gradient = (float*)calloc(CoolSection, sizeof(float));
partionQ = (float*)calloc(CoolSection, sizeof(float));
ThickAll = (float*)calloc(Section, sizeof(float));
JacobianMatrix = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
JacobianMatrix[i] = (float*)calloc(CoolSection, sizeof(float));
Mean_TSurfaceElement = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
Mean_TSurfaceElement[i] = (float*)calloc(CoolSection, sizeof(float));
Mean_TSurfaceElementOne = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
Mean_TSurfaceElementOne[i] = (float*)calloc(CoolSection, sizeof(float));
Mean_TCenterElement = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
Mean_TCenterElement[i] = (float*)calloc(CoolSection, sizeof(float));
Mean_TCenterElementOne = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
Mean_TCenterElementOne[i] = (float*)calloc(CoolSection, sizeof(float));
JacobinTZgradient = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
JacobinTZgradient[i] = (float*)calloc(CoolSection, sizeof(float));
TZ_gradientElement = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
TZ_gradientElement[i] = (float*)calloc(CoolSection, sizeof(float));
TZ_gradientElementOne = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
TZ_gradientElementOne[i] = (float*)calloc(CoolSection, sizeof(float));
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
HANDLE_ERROR(hipSetDevice(0));
HANDLE_ERROR(hipMalloc((void**)&dev_T_New, nx * ny * nz * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&dev_T_Last, nx * ny * nz * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&dev_ccml, (Section + 1) * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&dev_H_Init, Section * sizeof(float)));
HANDLE_ERROR(hipMemcpy(dev_T_Last, T_Init, nx * ny * nz * sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_ccml, ccml, (Section + 1) * sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_H_Init, H_Init, Section * sizeof(float), hipMemcpyHostToDevice));
dim3 threadsPerBlock(num_threadsx, num_threadsy);
float SurfaceError[TestIter / 10+1][CoolSection];
for (int t = 0; t < TestIter*10+1; t++)
{
//
//if(t / Num_Iter >= 2* StaticIter&&t / Num_Iter < 4 * StaticIter)//100-200
// Vcast = -0.017;
//else if(t / Num_Iter >= 4 * StaticIter&&t / Num_Iter < 6 * StaticIter)//200-300
// Vcast = -0.02;
//else if (t / Num_Iter >= 6 * StaticIter&&t / Num_Iter < 8 * StaticIter)//300-400
// Vcast = -0.023;
//else//400
// Vcast = -0.02;
if (t % Num_Iter == 0)
{
int iter = t / Num_Iter;
HANDLE_ERROR(hipMemcpy(T_HoldLast, dev_T_Last, nx * ny * nz * sizeof(float), hipMemcpyDeviceToHost));
for (int m = 0; m < CoolSection + 1; m++)
{
if (m == CoolSection)
{
for (int temp = 0; temp < Section; temp++) {
H_Init_Temp[temp] = H_Init[temp];
}
HANDLE_ERROR(hipMemcpy(dev_H_Init, H_Init_Temp, Section * sizeof(float), hipMemcpyHostToDevice));
for (int PNum = 0; PNum < Num_Iter; PNum++)
{
addKernel << <num_blocks, threadsPerBlock >> >(dev_T_New, dev_T_Last, dev_ccml, dev_H_Init, dx, dy, dz, tao, nx, ny, nz, dstOut,Vcast);
dstOut = !dstOut;
}
HANDLE_ERROR(hipMemcpy(T_Result, dev_T_New, nx * ny * nz * sizeof(float), hipMemcpyDeviceToHost));
float* Mean_TSurface = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result,0); // calculation the mean surface temperature of steel billets in every cooling sections
float* Mean_TPoint = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result, 8.0 / 250 * nx);//
float Point_TSurface = Mean_TPoint[MoldSection];
float *Mean_TCenter = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result,nx/2);//
for (int temp = 0; temp < CoolSection; temp++) {
Point_TSurfaceElementOne[temp] = Point_TSurface;
if (iter >= StaticIter)
{
if (temp < CoolSection - 1)
TZ_gradient[temp] = (Mean_TSurface[temp + 1 + MoldSection] - Mean_TSurface[temp + MoldSection]) / delta_z[temp];
else
//TZ_gradient[temp] = -(T_Result[nx*nz*(ny - 1) + 0 * nz + (int)(nx - 1)] - Mean_TSurface[temp + MoldSection]) / delta_z[temp];
TZ_gradient[temp] = 100;
//printf("TZ_gradient=%f ", TZ_gradient[temp]);
}
for (int column = 0; column < CoolSection; column++)
{
Mean_TSurfaceElementOne[temp][column] = Mean_TSurface[column + MoldSection];
Mean_TCenterElementOne[temp][column] = Mean_TCenter[column + MoldSection];
TZ_gradientElementOne[temp][column] = TZ_gradient[column + MoldSection];
}
}
//printf("\n");
}
else
{
for (int temp = 0; temp < Section; temp++)
H_Init_Temp[temp] = H_Init[temp];
//printf(" h=%f", H_Init_Temp[m]);
H_Init_Temp[m + MoldSection] = H_Init[m + MoldSection] + dh;
HANDLE_ERROR(hipMemcpy(dev_H_Init, H_Init_Temp, Section * sizeof(float), hipMemcpyHostToDevice));
for (int PNum = 0; PNum < Num_Iter; PNum++)//
{
addKernel << <num_blocks, threadsPerBlock >> >(dev_T_New, dev_T_Last, dev_ccml, dev_H_Init, dx, dy, dz, tao, nx, ny, nz, dstOut,Vcast);
dstOut = !dstOut;
}
HANDLE_ERROR(hipMemcpy(T_Result, dev_T_New, nx * ny * nz * sizeof(float), hipMemcpyDeviceToHost));
float* Mean_TSurface = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result,0); // calculation the mean surface temperature of steel billets in every cooling sections
float* Mean_TPoint = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result, 8.0 / 250 * nx);
Point_TSurfaceElement[m] = Mean_TPoint[m];
float *Mean_TCenter = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result,nx/2);
if (iter >= 2 * StaticIter)
{
if (m < CoolSection - 1)
TZ_gradient[m] = (Mean_TSurface[m + 1 + MoldSection] - Mean_TSurface[m + MoldSection]) / delta_z[m];
else
//TZ_gradient[m] = -(T_Result[nx*nz*(ny - 1) + 0 * nz + (int)(nx - 1)] - Mean_TSurface[m + MoldSection]) / delta_z[m];
TZ_gradient[m] = 150;
}
for (int column = 0; column < CoolSection; column++)
{
Mean_TSurfaceElement[m][column] = Mean_TSurface[column + MoldSection];//
Mean_TCenterElement[m][column] = Mean_TCenter[column + MoldSection];
TZ_gradientElement[m][column] = TZ_gradient[column + MoldSection];
}
}
/*for (int i = 0; i < CoolSection; i++) {
printf("TZ_gradient=%f ",TZ_gradient[i]);
}
printf("\n");*/
//,
if (iter >= StaticIter)
{
g[0] = Mean_TSurfaceElement[0][0] - T_bmax;
g[2] = Mean_TCenterElement[MoldSection+1][MoldSection+1] - Tl;//
g[1] = Point_TSurfaceElement[MoldSection] - Ts;//
/*printf("g[0]=%f\n", g[0]);
printf("g[1]=%f\n", g[1]);
printf("g[2]=%f\n", g[2]);*/
for (int i = M; i < N; i++) {
if (i < M + CoolSection)
g[i] = Tu - TZ_gradient[i-M];
else
g[i] = TZ_gradient[i - M - CoolSection]-Td;
}
}
for (int temp = 0; temp < M; temp++)
fitness[iter] += lamda[iter][temp] * g[temp];
for (int i = 0; i < N; i++)
{
if (iter < StaticIter)
gtest[iter][i] = 0;
else
gtest[iter][i] = g[i];
}
HANDLE_ERROR(hipMemcpy(dev_T_Last, T_HoldLast, nx * ny * nz * sizeof(float), hipMemcpyHostToDevice));
}
printf("iter=%d\n", iter);
printf("g[0]=%f\n", g[0]);
//
if (iter >= StaticIter)
{
norm_g[iter] = stop_criterion();
norm_g[0] = norm_g[StaticIter];
}
if (norm_g[iter - 1]<eps&&iter>2*StaticIter)//
break;
//
c[iter] = update_c(norm_g, c0, iter-StaticIter);
printf("c=%f\n", c[iter]);
if (iter <= StaticIter)
for (int j = 0; j < N; j++)
lamda[iter][j] = 1;
/*for (int j = 0; j < N; j++) {
printf("lamda[i]=%f\n", lamda[iter][j]);
printf("g[i]=%f\n", g[j]);
}*/
for (int j = 0; j < Section; j++) {
htest[iter][j] = H_Init_Temp[j];
}
//
for (int row = 0; row < CoolSection; row++)
{
for (int column = 0; column < CoolSection; column++)
{
JacobianMatrix[row][column] = (Mean_TSurfaceElement[row][column] - Mean_TSurfaceElementOne[row][column]) / dh; //1
JacobinTZgradient[row][column] = (TZ_gradientElement[row][column] - TZ_gradientElementOne[row][column]) / dh;
if (row == 0)
{
if (iter > StaticIter)
{
JacobianG0[column] = (Mean_TSurfaceElement[0][column] - Mean_TSurfaceElementOne[0][column]) / dh;
JacobianG1[column] = (Point_TSurfaceElement[column]- Point_TSurfaceElementOne[column]) / dh;
JacobianG2[column] = (Mean_TCenterElement[4][column] - Mean_TCenterElementOne[4][column]) / dh;
}
}
}
}
for (int temp = 0; temp < CoolSection; temp++) {
Delta_H_Init[temp] = 0.0;
for (int column = 0; column < CoolSection; column++)
{
Delta_H_Init[temp] += (Mean_TSurfaceElementOne[temp][column] - Taim[column]) * JacobianMatrix[temp][column];//2
if (iter > StaticIter)
{
Delta_H_Init[temp] += lamda[iter][temp + M] * JacobinTZgradient[temp][column];
Delta_H_Init[temp] +=(-1)* lamda[iter][temp + M + CoolSection] * JacobinTZgradient[temp][column];
}
}
Delta_H_Init[temp] += H_Init[temp] - H_Init_Final[temp];//h
fitness[iter] += lamda[iter][temp + M] * g[temp + M];
fitness[iter] += lamda[iter][temp + M + CoolSection] * g[temp + M + CoolSection];
fitness[iter]+= H_Init[temp] - H_Init_Final[temp];
if (iter > StaticIter)
{
Delta_H_Init[temp] += lamda[iter][0] * JacobianG0[temp];//1
Delta_H_Init[temp] += lamda[iter][1] * JacobianG1[temp];//2
Delta_H_Init[temp] += lamda[iter][2] * JacobianG2[temp];//3
}
//printf(" Delta_H_Init=%f\n", Delta_H_Init[temp]);
}
printf("\n");
//
arf1 = 0.0, arf2 = 0.0;
for (int temp = 0; temp < CoolSection; temp++)
{
for (int column = 0; column < CoolSection; column++)
{
arf1 += ((Mean_TSurfaceElementOne[0][temp] - Taim[temp]) * JacobianMatrix[temp][column]) * Delta_H_Init[column];
if (iter > StaticIter)
{
arf1 += (lamda[iter][temp + M] * JacobinTZgradient[temp][column] * Delta_H_Init[column]);
arf1 += (lamda[iter][temp + M + CoolSection] * (-1)*JacobinTZgradient[temp][column] * Delta_H_Init[column]);
}
arf2 += JacobianMatrix[temp][column] * Delta_H_Init[column] * JacobianMatrix[temp][column] * Delta_H_Init[column];
}
//arf1 += (H_Init[temp] - H_Init_Final[temp])*Delta_H_Init[temp];
//if (iter > StaticIter)
{
arf1 += lamda[iter][0] * JacobianG0[temp] * Delta_H_Init[temp];//1
arf1 += lamda[iter][1] * JacobianG1[temp] * Delta_H_Init[temp];//2
arf1 += lamda[iter][2] * JacobianG2[temp] * Delta_H_Init[temp];//2
}
}
step = -arf1 / ((arf2)+0.001);//,0.001
testArray[iter] = step;
printf("step=%f\n", step);
//
for (int temp = 0; temp < CoolSection; temp++)
{
H_Init_Final[temp] = H_Init[temp];
H_Init[temp + MoldSection] += step *Delta_H_Init[temp];
//printf(" h=%f", H_Init[temp + MoldSection]);
}
float *Q_water = relationshiphandQ(H_Init, Q_air);
/*for (int temp = 0; temp < CoolSection; temp++) {
printf(" Q_water=%f", Q_water[temp]*1000/60);
}*/
if (iter >= StaticIter)
{
for (int j = 0; j < N; j++)
{
lamda[iter + 1][j] = lamda[iter][j] + c[iter] * g[j];//lamda
if (lamda[iter + 1][j] < 0)
lamda[iter + 1][j] = 0;//lamda
if (lamda[iter + 1][j] > 100)
lamda[iter][j] /= lamda[iter][j];
}
}
}
//
for (int temp = 0; temp < Section; temp++)
{
H_Init_Temp[temp] = H_Init[temp];
}
HANDLE_ERROR(hipMemcpy(dev_H_Init, H_Init_Temp, Section * sizeof(float), hipMemcpyHostToDevice));
addKernel << <num_blocks, threadsPerBlock >> >(dev_T_New, dev_T_Last, dev_ccml, dev_H_Init, dx, dy, dz, tao, nx, ny, nz, dstOut,Vcast);
dstOut = !dstOut;
HANDLE_ERROR(hipMemcpy(T_Result, dev_T_Last, nx * ny * nz* sizeof(float), hipMemcpyDeviceToHost));
float* Mean_TSurface = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result, 0); // calculation the mean surface temperature of steel billets in every cooling sections
for (int temp = 0; temp < CoolSection;temp++)
fitness[t / Num_Iter] += (Mean_TSurface[temp+MoldSection]-Taim[temp]);
if (t % (10 * Num_Iter) == 0)
{
//
/*int thickness = 0;
for (; thickness < nx / 2; thickness++) {
float *Mean_Thickness = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result, thickness);
if (Mean_Thickness[MoldSection] > Ts)
break;
}*/
//printf("thickness=%d\n", thickness);
ThickAll = calculateThickness(T_Result, nx, ny, nz, dy, ccml, Ts, 250);//0.25m
cout << endl<<" ThickAll= " << endl;
for (int temp = 0; temp < CoolSection; temp++)
cout << ThickAll[temp + MoldSection] << ", ";
cout << " time_step = " << t << ", " << "simulation time = " << t * tao;
cout << endl << "TSurface = " << endl;
for (int temp = 0; temp < CoolSection; temp++)
cout << Mean_TSurface[temp + MoldSection] << ", ";
cout << endl << "TSurface - Taim = " << endl;
for (int temp = 0; temp < CoolSection; temp++)
{
cout << (Mean_TSurface[temp + MoldSection] - Taim[temp]) << ", ";
SurfaceError[t / (10 * Num_Iter)][temp] = (Mean_TSurface[temp + MoldSection] - Taim[temp]);
}
cout << endl;
}
}
ofstream fout;
fout.open("F:\\data_zf\\HighTcastGPUMPC3D2block3threads.txt");
if (!fout)
cout << "HighTcastGPUMPC3D2block3threads is not open" << endl;
else
{
for (int j = 0; j < ny; j++)
{
for (int i = 0; i < nx; i++)
{
for (int m = 0; m < nz; m++)
fout << T_Result[nx * nz * j + i * nz + m] << ", ";
fout << endl;
}
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastSurfaceGPUMPC3D2block3threads.txt");
if (!fout)
cout << "HighTcastSurfaceGPUMPC3D2block3threads is not open" << endl;
else
{
for (int j = 0; j < ny; j++)
{
fout << T_Result[nx * nz * j + 0 * nz + int((nx - 1) / 2)] << ", ";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastCenterGPUMPC3D2block3threads.txt");
if (!fout)
cout << "HighTcastCenterGPUMPC3D2block3threads is not open" << endl;
else
{
for (int j = 0; j < ny; j++)
{
fout << T_Result[nx * nz * j + int((nx - 1) / 2) * nz + int((nx - 1) / 2)] << ", ";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastSurfaceErrorGPUMPC3D2block3threads.txt");
if (!fout)
cout << "HighTcastSurfaceErrorGPUMPC3D2block3threads is not open" << endl;
else
{
for (int i = 0; i < TestIter / 10+1; i++)
{
for (int j = 0; j < CoolSection; j++)
fout << SurfaceError[i][j] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastThicknessGPUMPC3D2block3threads.txt");
if (!fout)
cout << "HighTcastThicknessGPUMPC3D2block3threads is not open" << endl;
else
{
for (int i = 0; i < CoolSection; i++) {
fout << ThickAll[i + MoldSection] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastlamda.txt");
if (!fout)
cout << "HighTcastlamda is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
for (int j = 0; j < N; j++)
fout << lamda[i][j] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastgtest.txt");
if (!fout)
cout << "HighTcastgtest is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
for (int j = 0; j < N; j++)
fout << gtest[i][j] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcasthtest.txt");
if (!fout)
cout << "HighTcasthtest is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
for (int j = 0; j < Section; j++)
fout << htest[i][j] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastfitnesstest.txt");
if (!fout)
cout << "HighTcastfitnesstest is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
fout << fitness[i] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastc.txt");
if (!fout)
cout << "HighTcastc is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
fout << c[i] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcaststep.txt");
if (!fout)
cout << "HighTcaststep is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
fout << testArray[i] << ",";
fout << endl;
}
}
fout.close();
// Check for any errors launching the kernel
HANDLE_ERROR(hipGetLastError());
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
Error:
hipFree(T_Init);
hipFree(dev_T_New);
hipFree(dev_T_Last);
hipFree(dev_ccml);
hipFree(dev_H_Init);
hipFree(JacobianMatrix);
hipFree(JacobianG0);
hipFree(JacobianG1);
hipFree(JacobianG2);
hipFree(JacobinTZgradient);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
__device__ void Physicial_Parameters(float T, float *pho, float *Ce, float *lamd)
{
float Ts = 1462.0, Tl = 1518.0, lamds = 30, lamdl = 50, phos = 7000, phol = 7500, ce = 540.0, L = 265600.0, fs = 0.0;
if (T<Ts)
{
fs = 0;
*pho = phos;
*lamd = lamds;
*Ce = ce;
}
if (T >= Ts&&T <= Tl)
{
fs = (T - Ts) / (Tl - Ts);
*pho = fs*phos + (1 - fs)*phol;
*lamd = fs*lamds + (1 - fs)*lamdl;
*Ce = ce + L / (Tl - Ts);
}
if (T>Tl)
{
fs = 1;
*pho = phol;
*lamd = lamdl;
*Ce = ce;
}
}
__device__ float Boundary_Condition(int j, float dy, float *ccml_zone, float *H_Init)
{
float YLabel, h = 0.0;
YLabel = j*dy;
for (int i = 0; i < Section; i++)
{
if (YLabel >= *(ccml_zone + i) && YLabel <= *(ccml_zone + i + 1))
h = *(H_Init + i);
}
return h;
}
float* Calculation_MeanTemperature(int nx, int ny, int nz, float dy, float *ccml, float *T, float num)
{
float y;
int count = 0;
int i = 0;
float* Mean_Temperature;
Mean_Temperature = new float[Section];
for (int i = 0; i < Section; i++)
{
Mean_Temperature[i] = 0.0;
for (int j = 0; j < ny - num; j++)
{
y = j * dy;
if (y > *(ccml + i) && y <= *(ccml + i + 1))
{
Mean_Temperature[i] = Mean_Temperature[i] + T[nx * nz * j + (int)(num * nz) + int((nx - 1) / 2)];
count++;
}
}
Mean_Temperature[i] = Mean_Temperature[i] / float(count);
count = 0;
}
return Mean_Temperature;
}
float stop_criterion() {
float norm_g = 0.0;
for (int i = 0; i <= N - 1; i++)
norm_g = norm_g + g[i] * g[i];
norm_g = sqrt(norm_g);
//printf("norm_g=%f\n", norm_g);
return(norm_g);
}
float update_c(float norm_g[], float c0,int iter) {//Luh20
float dM = 2.0, r = 0.5, p = 0.0, c = 10;
if (iter > 0)
{
p = 1.0 - 1.0 / pow(iter, r);//p67
alfa[iter - 1] = 1.0 - 1.0 / (dM*pow(iter, p));//iter67
}
if (iter <= StaticIter)
return c0;
else
{
/*for(int i=0;i<iter;i++)
printf(" alfa=%f",alfa[i]);
printf("\n");*/
c = c0*norm_g[0] / norm_g[iter - 1];//c20
//printf("norm_g[0]=%f\n", norm_g[0]);
//printf("norm_g[iter-1]=%f\n", norm_g[iter-1]);
for (int i = 0; i <= iter - 1; i++)
c = c*alfa[i];//c20
}
return(c);
}
float *relationshiphandQ(float *h_Init, float* Q_air)
{
float hx[CoolSection] = { 56.5,40.2,40.2,40.2,40.2,40.2,40.2,40.2 };
float rw[CoolSection] = { 0.845,0.568,0.568,0.568,0.568,0.568,0.568,0.568 };
float ra[CoolSection] = { 0.2,0.1902,0.1902,0.1902,0.1902,0.1902,0.1902,0.1902 };
float hr[CoolSection] = { 0.15,0.082,0.082,0.082,0.082,0.082,0.082,0.082 };
float Sw[CoolSection] = { 1.8,3.86,1.8,1.8, 1.8, 1.8, 1.8, 1.8 };
float Sl[CoolSection] = { 0.5,0.8,2.5,1.8,4.0,3.5,6.0,8.9 };
float *Q_water;
Q_water = new float[CoolSection];
for (int i = 0; i < CoolSection; i++)
{
Q_water[i] = pow((h_Init[i+MoldSection] - hr[i]) / hx[i] / pow(Q_air[i] / (Sl[i] * Sw[i]), ra[i]), 1 / rw[i])*(Sl[i] * Sw[i]);
}
return Q_water;
}
float *calculateThickness(float *T_result, int nx, int ny, int nz, float dy, float *ccml,float Ts,float thick) {
float y;
int count = 0;
float *Mean_Temperature;
Mean_Temperature = new float[Section];
int *thickness;
thickness = new int[Section];
float *res_thickness;
res_thickness = new float[Section];
for (int i = 0; i < Section; i++)
{
for (thickness[i] = 0; thickness[i] <nx; thickness[i]++){
Mean_Temperature[i] = 0.0;
for (int j = 0; j < ny; j++)
{
y = j * dy;
if (y > *(ccml + i) && y <= *(ccml + i + 1))
{
Mean_Temperature[i] = Mean_Temperature[i] + T_result[nx * nz * j + thickness[i] * nz + int((nx - 1) / 2)];
count++;
}
}
Mean_Temperature[i] = Mean_Temperature[i] / float(count);
count = 0;
if (Mean_Temperature[i] > Ts)
break;
}
res_thickness[i]=thickness[i] * thick / nx;
}
return res_thickness;
}
| 2154065b14e0abd0be6c7e2fcab88278d4d71f70.cu | //在MPC模型中将h改为Q,增加修改浇注温度
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <fstream>
#include "book.h"
#include "gridcheck.h"
using namespace std;
# define Section 12 // number of cooling sections
# define CoolSection 8
# define MoldSection 4
# define StaticIter 50
# define M 3
# define N M+2*CoolSection
# define TestIter 500
# define limit 501//limit>=tnpts/num_iter
float ccml[Section + 1] = { 0.0,0.2,0.4,0.6,0.8,1.0925,2.27,4.29,5.831,9.6065,13.6090,19.87014,28.599 }; // The cooling sections
//float H_Init[Section] = { 1380,1170,980,800,1223.16,735.05,424.32,392.83,328.94,281.64,246.16,160.96 }; // The heat transfer coefficients in the cooling sections
float H_Init[Section] = { 1400,1200,1000,800,1200,750,400,400,350,300,250,150 };
//float H_Init[Section] = { 1500,1300,1100,900,1300,850,500,500,450,400,350,250 };
//float H_Init_Temp[Section] = { 1380,1170,980,800,1223.16,735.05,424.32,392.83,328.94,281.64,246.16,160.96 }; // The heat transfer coefficients in the cooling sections
float H_Init_Temp[Section] = { 0 };
float H_Init_Final[Section] = { 1380 };
float Q_air[CoolSection] = { 200,1500,850,650,1000,850,400,480 };
float Taim[CoolSection] = { 966.149841, 925.864746, 952.322083, 932.175537, 914.607117, 890.494263, 870.804443, 890.595825 };
float delta_z[Section] = {2.7,2.7,1.8,1.8,1.8,1.8,1.8,0.9};
float *Calculation_MeanTemperature(int nx, int ny, int nz, float dy, float *ccml, float *T, float num);
float *calculateThickness(float *T_result, int nx, int ny, int nz, float dy, float *ccml, float Ts, float thick);
cudaError_t addWithCuda(float *T_Init, float dx, float dy, float dz, float tao, int nx, int ny, int nz, int tnpts, int num_blocks, int num_threadsx, int num_threadsy);
__device__ void Physicial_Parameters(float T, float *pho, float *Ce, float *lamd);
__device__ float Boundary_Condition(int j, float dx, float *ccml_zone, float *H_Init);
float *relationshiphandQ(float *h_Init, float* Q_air);
float stop_criterion();
float update_c(float[], float c0,int iter);
void update_lamda(float[],int iter,float[]);
float alfa[limit] = { 1.0 };
float g[N] = { 0 };
float testArray[TestIter] = { 0 };
__global__ void addKernel(float *T_New, float *T_Last, float *ccml, float *H_Init, float dx, float dy, float dz, float tao, int nx, int ny, int nz, bool disout,float Vcast)
{
int i = threadIdx.x;
int m = threadIdx.y;
int j = blockIdx.x;
int idx = j * nx * nz + m * nx + i;
int ND = nx * nz;
int D = nx;
float pho, Ce, lamd; // physical parameters pho represents desity, Ce is specific heat and lamd is thermal conductivity
float a, T_Up, T_Down, T_Right, T_Left, T_Forw, T_Back, h = 100.0, Tw = 30.0, T_Cast = 1558.0; //Vcast = -0.02
if (disout) {
Physicial_Parameters(T_Last[idx], &pho, &Ce, &lamd);
a = (lamd) / (pho*Ce);
h = Boundary_Condition(j, dy, ccml, H_Init);
if (j == 0) //1
{
T_New[idx] = T_Cast;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m != 0 && m != (nz - 1)) //10
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = (a*tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ (a*tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m != 0 && m != (nz - 1)) //11
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m != 0 && m != (nz - 1)) //12
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx - 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m == 0) //13
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m == (nz - 1)) //14
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m == 0) //15
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m == (nz - 1)) //16
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m == 0) //17
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx - 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m == (nz - 1)) //18
{
//T_New[idx] = 1550.0;
T_Up = T_Last[idx - 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx - ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i != 0 && i != (nx - 1) && m == 0) //19
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D] - 2 * dz * h * (T_Last[idx] - Tw) / lamd;
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i != 0 && i != (nx - 1) && m == (nz - 1)) //20
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D] - 2 * dz * h * (T_Last[idx] - Tw) / lamd;
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m == 0) //21
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m == 0) //22
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx - 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx + D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m == (nz - 1)) //23
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m == (nz - 1)) //24
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx - 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx - D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m != 0 && m != (nz - 1)) //25
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx + 1] - 2 * dx * h * (T_Last[idx] - Tw) / lamd;
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m != 0 && m != (nz - 1)) //26
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx - 1] - 2 * dx * h * (T_Last[idx] - Tw) / lamd;
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else //27
{
//T_New[idx] = T_Cast;
T_Up = T_Last[idx + 1];
T_Down = T_Last[idx - 1];
T_Right = T_Last[idx + ND];
T_Left = T_Last[idx - ND];
T_Forw = T_Last[idx + D];
T_Back = T_Last[idx - D];
T_New[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_Last[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
}
else
{
Physicial_Parameters(T_New[idx], &pho, &Ce, &lamd);
a = (lamd) / (pho*Ce);
h = Boundary_Condition(j, dy, ccml, H_Init);
if (j == 0) //1
{
T_Last[idx] = T_Cast;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m != 0 && m != (nz - 1)) //10
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m != 0 && m != (nz - 1)) //11
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m != 0 && m != (nz - 1)) //12
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx - 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m == 0) //13
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i != 0 && i != (nx - 1) && m == (nz - 1)) //14
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m == 0) //15
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == 0 && m == (nz - 1)) //16
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m == 0) //17
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx - 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j == (ny - 1) && i == (nx - 1) && m == (nz - 1)) //18
{
//T_Last[idx] = 1550.0;
T_Up = T_New[idx - 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx - ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i != 0 && i != (nx - 1) && m == 0) //19
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D] - 2 * dz * h * (T_Last[idx] - Tw) / lamd;
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i != 0 && i != (nx - 1) && m == (nz - 1)) //20
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D] - 2 * dz * h * (T_Last[idx] - Tw) / lamd;
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m == 0) //21
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m == 0) //22
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx - 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx + D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m == (nz - 1)) //23
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m == (nz - 1)) //24
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx - 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx - D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == 0 && m != 0 && m != (nz - 1)) //25
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx + 1] - 2 * dx * h * (T_New[idx] - Tw) / lamd;
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else if (j != 0 && j != (ny - 1) && i == (nx - 1) && m != 0 && m != (nz - 1)) //26
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx - 1] - 2 * dx * h * (T_New[idx] - Tw) / lamd;
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
else //27
{
//T_Last[idx] = T_Cast;
T_Up = T_New[idx + 1];
T_Down = T_New[idx - 1];
T_Right = T_New[idx + ND];
T_Left = T_New[idx - ND];
T_Forw = T_New[idx + D];
T_Back = T_New[idx - D];
T_Last[idx] = a*(tao / (dx*dx))*T_Up + a*(tao / (dx*dx))*T_Down + ((1 - 2 * a*tao / (dx*dx) - 2 * a*tao / (dy*dy) - 2 * a*tao / (dz*dz) + tao*Vcast / dy))*T_New[idx]
+ a*(tao / (dy*dy))*T_Right + (a*tao / (dy*dy) - tao*Vcast / dy)*T_Left + (a*tao / (dz*dz))*T_Forw + (a*tao / (dz*dz))*T_Back;
}
}
}
int main()
{
const int nx = 21, ny = 3000, nz = 21; // nx is the number of grid in x direction, ny is the number of grid in y direction.
int num_blocks = 1, num_threadsx = 1, num_threadsy = 1;// num_threadsz = 1; // block number(1D) thread number in x and y dimension(2D)
int tnpts = 10001; // time step
float Lx = 0.25, Ly = 28.599, Lz = 0.25, t_final = 2000.0, dx, dy, dz, tao; // T_Cast is the casting temperature Lx and Ly is the thick and length of steel billets
float T_Cast = 1758.0;//T_Cast = 1558.0,
float *T_Init;
num_threadsx = nx;
num_threadsy = nz;
num_blocks = ny;
T_Init = (float*)calloc(nx*ny*nz,sizeof(float)); // Initial condition
for (int m = 0; m < nz; m++)
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
T_Init[nx * ny * m + j * nx + i] = T_Cast; // give the initial condition
dx = Lx / (nx - 1); // the grid size x
dy = Ly / (ny - 1); // the grid size y
dz = Lz / (nz - 1); // the grid size z
tao = t_final / (tnpts - 1); // the time step size
//gridcheck(dx, dy, tao);
cout << "Casting Temperature " << T_Cast << endl;
cout << "The length of steel billets(m) " << Ly << endl;
cout << "The width of steel billets(m) " << Lz << endl;
cout << "The thick of steel billets(m) " << Lx << endl;
cout << "dx(m) " << dx << ", ";
cout << "dy(m) " << dy << ", ";
cout << "dz(m) " << dz << ", ";
cout << "tao(s) " << tao << ", ";
cout << "simulation time(s) " << t_final << endl;
//clock_t timestart = clock();
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
cudaError_t cudaStatus = addWithCuda(T_Init, dx, dy, dz, tao, nx, ny, nz, tnpts, num_blocks, num_threadsx, num_threadsy);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float elapsetime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsetime, start, stop));
cout << "running time =" << (elapsetime);
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
/*clock_t timeend = clock();
cout << "running time = " << (timeend - timestart);*/
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
cudaError_t addWithCuda(float *T_Init, float dx, float dy, float dz, float tao, int nx, int ny, int nz, int tnpts, int num_blocks, int num_threadsx, int num_threadsy)
{
float *dev_T_New, *dev_T_Last, *dev_ccml, *dev_H_Init; // the point on GPU
float *T_Result, *Delta_H_Init, *T_HoldLast,*ThickAll, **Mean_TSurfaceElement, **Mean_TSurfaceElementOne;
float *Point_TSurfaceElement, *Point_TSurfaceElementOne, **Mean_TCenterElement, **Mean_TCenterElementOne;
float **JacobianMatrix, *JacobianG0, *JacobianG1, *JacobianG2, *TZ_gradient,*partionQ;
float **JacobinTZgradient, **TZ_gradientElement, **TZ_gradientElementOne;
float dh = 10.0,dQ=1.0, arf1, arf2, step = -0.0001,T_bmax=1100,Ts=1462,Tl= 1518.0,Tu=-100,Td=200;
float Vcast = -0.02;
const int Num_Iter = 10, PrintLabel = 0;// The result can be obtained by every Num_Iter time step
volatile bool dstOut = true;
//约束函数
float c[limit] = {10};
float norm_g[limit] = { 0 };
float eps = 0.0001,c0 = 10;
float lamda[limit][N] = { 1 };
float gtest[limit][N] = { 0 };
float htest[limit][Section] = { 0 };
float fitness[limit] = { 0 };
T_Result = (float *)calloc(nx * ny * nz, sizeof(float)); // The temperature of steel billets
Delta_H_Init = (float*)calloc(CoolSection, sizeof(float));
T_HoldLast = (float*)calloc(nz * ny * nx, sizeof(float));
Point_TSurfaceElement = (float*)calloc(CoolSection, sizeof(float));
Point_TSurfaceElementOne = (float*)calloc(CoolSection, sizeof(float));
JacobianG0 = (float*)calloc(CoolSection, sizeof(float));
JacobianG1 = (float*)calloc(CoolSection, sizeof(float));
JacobianG2 = (float*)calloc(CoolSection, sizeof(float));
TZ_gradient = (float*)calloc(CoolSection, sizeof(float));
partionQ = (float*)calloc(CoolSection, sizeof(float));
ThickAll = (float*)calloc(Section, sizeof(float));
JacobianMatrix = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
JacobianMatrix[i] = (float*)calloc(CoolSection, sizeof(float));
Mean_TSurfaceElement = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
Mean_TSurfaceElement[i] = (float*)calloc(CoolSection, sizeof(float));
Mean_TSurfaceElementOne = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
Mean_TSurfaceElementOne[i] = (float*)calloc(CoolSection, sizeof(float));
Mean_TCenterElement = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
Mean_TCenterElement[i] = (float*)calloc(CoolSection, sizeof(float));
Mean_TCenterElementOne = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
Mean_TCenterElementOne[i] = (float*)calloc(CoolSection, sizeof(float));
JacobinTZgradient = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
JacobinTZgradient[i] = (float*)calloc(CoolSection, sizeof(float));
TZ_gradientElement = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
TZ_gradientElement[i] = (float*)calloc(CoolSection, sizeof(float));
TZ_gradientElementOne = (float**)calloc(CoolSection, sizeof(float));
for (int i = 0; i < CoolSection; i++)
TZ_gradientElementOne[i] = (float*)calloc(CoolSection, sizeof(float));
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
HANDLE_ERROR(cudaSetDevice(0));
HANDLE_ERROR(cudaMalloc((void**)&dev_T_New, nx * ny * nz * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&dev_T_Last, nx * ny * nz * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&dev_ccml, (Section + 1) * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&dev_H_Init, Section * sizeof(float)));
HANDLE_ERROR(cudaMemcpy(dev_T_Last, T_Init, nx * ny * nz * sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_ccml, ccml, (Section + 1) * sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_H_Init, H_Init, Section * sizeof(float), cudaMemcpyHostToDevice));
dim3 threadsPerBlock(num_threadsx, num_threadsy);
float SurfaceError[TestIter / 10+1][CoolSection];
for (int t = 0; t < TestIter*10+1; t++)
{
//不改变拉速
//if(t / Num_Iter >= 2* StaticIter&&t / Num_Iter < 4 * StaticIter)//100-200
// Vcast = -0.017;
//else if(t / Num_Iter >= 4 * StaticIter&&t / Num_Iter < 6 * StaticIter)//200-300
// Vcast = -0.02;
//else if (t / Num_Iter >= 6 * StaticIter&&t / Num_Iter < 8 * StaticIter)//300-400
// Vcast = -0.023;
//else//400以后
// Vcast = -0.02;
if (t % Num_Iter == 0)
{
int iter = t / Num_Iter;
HANDLE_ERROR(cudaMemcpy(T_HoldLast, dev_T_Last, nx * ny * nz * sizeof(float), cudaMemcpyDeviceToHost));
for (int m = 0; m < CoolSection + 1; m++)
{
if (m == CoolSection)
{
for (int temp = 0; temp < Section; temp++) {
H_Init_Temp[temp] = H_Init[temp];
}
HANDLE_ERROR(cudaMemcpy(dev_H_Init, H_Init_Temp, Section * sizeof(float), cudaMemcpyHostToDevice));
for (int PNum = 0; PNum < Num_Iter; PNum++)
{
addKernel << <num_blocks, threadsPerBlock >> >(dev_T_New, dev_T_Last, dev_ccml, dev_H_Init, dx, dy, dz, tao, nx, ny, nz, dstOut,Vcast);
dstOut = !dstOut;
}
HANDLE_ERROR(cudaMemcpy(T_Result, dev_T_New, nx * ny * nz * sizeof(float), cudaMemcpyDeviceToHost));
float* Mean_TSurface = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result,0); // calculation the mean surface temperature of steel billets in every cooling sections
float* Mean_TPoint = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result, 8.0 / 250 * nx);//一个点的温度
float Point_TSurface = Mean_TPoint[MoldSection];
float *Mean_TCenter = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result,nx/2);//中心温度,为啥是中心呢?
for (int temp = 0; temp < CoolSection; temp++) {
Point_TSurfaceElementOne[temp] = Point_TSurface;
if (iter >= StaticIter)
{
if (temp < CoolSection - 1)
TZ_gradient[temp] = (Mean_TSurface[temp + 1 + MoldSection] - Mean_TSurface[temp + MoldSection]) / delta_z[temp];
else
//TZ_gradient[temp] = -(T_Result[nx*nz*(ny - 1) + 0 * nz + (int)(nx - 1)] - Mean_TSurface[temp + MoldSection]) / delta_z[temp];
TZ_gradient[temp] = 100;
//printf("TZ_gradient=%f ", TZ_gradient[temp]);
}
for (int column = 0; column < CoolSection; column++)
{
Mean_TSurfaceElementOne[temp][column] = Mean_TSurface[column + MoldSection];
Mean_TCenterElementOne[temp][column] = Mean_TCenter[column + MoldSection];
TZ_gradientElementOne[temp][column] = TZ_gradient[column + MoldSection];
}
}
//printf("\n");
}
else
{
for (int temp = 0; temp < Section; temp++)
H_Init_Temp[temp] = H_Init[temp];
//printf(" h=%f", H_Init_Temp[m]);
H_Init_Temp[m + MoldSection] = H_Init[m + MoldSection] + dh;
HANDLE_ERROR(cudaMemcpy(dev_H_Init, H_Init_Temp, Section * sizeof(float), cudaMemcpyHostToDevice));
for (int PNum = 0; PNum < Num_Iter; PNum++)//预测时段长度
{
addKernel << <num_blocks, threadsPerBlock >> >(dev_T_New, dev_T_Last, dev_ccml, dev_H_Init, dx, dy, dz, tao, nx, ny, nz, dstOut,Vcast);
dstOut = !dstOut;
}
HANDLE_ERROR(cudaMemcpy(T_Result, dev_T_New, nx * ny * nz * sizeof(float), cudaMemcpyDeviceToHost));
float* Mean_TSurface = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result,0); // calculation the mean surface temperature of steel billets in every cooling sections
float* Mean_TPoint = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result, 8.0 / 250 * nx);
Point_TSurfaceElement[m] = Mean_TPoint[m];
float *Mean_TCenter = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result,nx/2);
if (iter >= 2 * StaticIter)
{
if (m < CoolSection - 1)
TZ_gradient[m] = (Mean_TSurface[m + 1 + MoldSection] - Mean_TSurface[m + MoldSection]) / delta_z[m];
else
//TZ_gradient[m] = -(T_Result[nx*nz*(ny - 1) + 0 * nz + (int)(nx - 1)] - Mean_TSurface[m + MoldSection]) / delta_z[m];
TZ_gradient[m] = 150;
}
for (int column = 0; column < CoolSection; column++)
{
Mean_TSurfaceElement[m][column] = Mean_TSurface[column + MoldSection];//和二冷区对应
Mean_TCenterElement[m][column] = Mean_TCenter[column + MoldSection];
TZ_gradientElement[m][column] = TZ_gradient[column + MoldSection];
}
}
/*for (int i = 0; i < CoolSection; i++) {
printf("TZ_gradient=%f ",TZ_gradient[i]);
}
printf("\n");*/
//添加约束,第一段二冷区的平均温度
if (iter >= StaticIter)
{
g[0] = Mean_TSurfaceElement[0][0] - T_bmax;
g[2] = Mean_TCenterElement[MoldSection+1][MoldSection+1] - Tl;//液相穴长度,这个衡量方法不对。
g[1] = Point_TSurfaceElement[MoldSection] - Ts;//感觉这个是对的
/*printf("g[0]=%f\n", g[0]);
printf("g[1]=%f\n", g[1]);
printf("g[2]=%f\n", g[2]);*/
for (int i = M; i < N; i++) {
if (i < M + CoolSection)
g[i] = Tu - TZ_gradient[i-M];
else
g[i] = TZ_gradient[i - M - CoolSection]-Td;
}
}
for (int temp = 0; temp < M; temp++)
fitness[iter] += lamda[iter][temp] * g[temp];
for (int i = 0; i < N; i++)
{
if (iter < StaticIter)
gtest[iter][i] = 0;
else
gtest[iter][i] = g[i];
}
HANDLE_ERROR(cudaMemcpy(dev_T_Last, T_HoldLast, nx * ny * nz * sizeof(float), cudaMemcpyHostToDevice));
}
printf("iter=%d\n", iter);
printf("g[0]=%f\n", g[0]);
//判断是否跳出循环
if (iter >= StaticIter)
{
norm_g[iter] = stop_criterion();
norm_g[0] = norm_g[StaticIter];
}
if (norm_g[iter - 1]<eps&&iter>2*StaticIter)//满足停止准则
break;
//更新乘子
c[iter] = update_c(norm_g, c0, iter-StaticIter);
printf("c=%f\n", c[iter]);
if (iter <= StaticIter)
for (int j = 0; j < N; j++)
lamda[iter][j] = 1;
/*for (int j = 0; j < N; j++) {
printf("lamda[i]=%f\n", lamda[iter][j]);
printf("g[i]=%f\n", g[j]);
}*/
for (int j = 0; j < Section; j++) {
htest[iter][j] = H_Init_Temp[j];
}
//目标函数梯度
for (int row = 0; row < CoolSection; row++)
{
for (int column = 0; column < CoolSection; column++)
{
JacobianMatrix[row][column] = (Mean_TSurfaceElement[row][column] - Mean_TSurfaceElementOne[row][column]) / dh; //复合导数1
JacobinTZgradient[row][column] = (TZ_gradientElement[row][column] - TZ_gradientElementOne[row][column]) / dh;
if (row == 0)
{
if (iter > StaticIter)
{
JacobianG0[column] = (Mean_TSurfaceElement[0][column] - Mean_TSurfaceElementOne[0][column]) / dh;
JacobianG1[column] = (Point_TSurfaceElement[column]- Point_TSurfaceElementOne[column]) / dh;
JacobianG2[column] = (Mean_TCenterElement[4][column] - Mean_TCenterElementOne[4][column]) / dh;
}
}
}
}
for (int temp = 0; temp < CoolSection; temp++) {
Delta_H_Init[temp] = 0.0;
for (int column = 0; column < CoolSection; column++)
{
Delta_H_Init[temp] += (Mean_TSurfaceElementOne[temp][column] - Taim[column]) * JacobianMatrix[temp][column];//复合导数2
if (iter > StaticIter)
{
Delta_H_Init[temp] += lamda[iter][temp + M] * JacobinTZgradient[temp][column];
Delta_H_Init[temp] +=(-1)* lamda[iter][temp + M + CoolSection] * JacobinTZgradient[temp][column];
}
}
Delta_H_Init[temp] += H_Init[temp] - H_Init_Final[temp];//增加的h的增量部分
fitness[iter] += lamda[iter][temp + M] * g[temp + M];
fitness[iter] += lamda[iter][temp + M + CoolSection] * g[temp + M + CoolSection];
fitness[iter]+= H_Init[temp] - H_Init_Final[temp];
if (iter > StaticIter)
{
Delta_H_Init[temp] += lamda[iter][0] * JacobianG0[temp];//增广形式的导数部分1
Delta_H_Init[temp] += lamda[iter][1] * JacobianG1[temp];//增广形式的导数部分2
Delta_H_Init[temp] += lamda[iter][2] * JacobianG2[temp];//增广形式的导数部分3
}
//printf(" Delta_H_Init=%f\n", Delta_H_Init[temp]);
}
printf("\n");
//步长根据目标函数改变
arf1 = 0.0, arf2 = 0.0;
for (int temp = 0; temp < CoolSection; temp++)
{
for (int column = 0; column < CoolSection; column++)
{
arf1 += ((Mean_TSurfaceElementOne[0][temp] - Taim[temp]) * JacobianMatrix[temp][column]) * Delta_H_Init[column];
if (iter > StaticIter)
{
arf1 += (lamda[iter][temp + M] * JacobinTZgradient[temp][column] * Delta_H_Init[column]);
arf1 += (lamda[iter][temp + M + CoolSection] * (-1)*JacobinTZgradient[temp][column] * Delta_H_Init[column]);
}
arf2 += JacobianMatrix[temp][column] * Delta_H_Init[column] * JacobianMatrix[temp][column] * Delta_H_Init[column];
}
//arf1 += (H_Init[temp] - H_Init_Final[temp])*Delta_H_Init[temp];
//if (iter > StaticIter)
{
arf1 += lamda[iter][0] * JacobianG0[temp] * Delta_H_Init[temp];//增广形式部分1
arf1 += lamda[iter][1] * JacobianG1[temp] * Delta_H_Init[temp];//增广形式部分2
arf1 += lamda[iter][2] * JacobianG2[temp] * Delta_H_Init[temp];//增广形式部分2
}
}
step = -arf1 / ((arf2)+0.001);//步长公式跟随目标函数改变,为啥要加0.001?
testArray[iter] = step;
printf("step=%f\n", step);
//迭代过程
for (int temp = 0; temp < CoolSection; temp++)
{
H_Init_Final[temp] = H_Init[temp];
H_Init[temp + MoldSection] += step *Delta_H_Init[temp];
//printf(" h=%f", H_Init[temp + MoldSection]);
}
float *Q_water = relationshiphandQ(H_Init, Q_air);
/*for (int temp = 0; temp < CoolSection; temp++) {
printf(" Q_water=%f", Q_water[temp]*1000/60);
}*/
if (iter >= StaticIter)
{
for (int j = 0; j < N; j++)
{
lamda[iter + 1][j] = lamda[iter][j] + c[iter] * g[j];//lamda的更新
if (lamda[iter + 1][j] < 0)
lamda[iter + 1][j] = 0;//保证系数lamda的非负性
if (lamda[iter + 1][j] > 100)
lamda[iter][j] /= lamda[iter][j];
}
}
}
//实际模拟连铸过程
for (int temp = 0; temp < Section; temp++)
{
H_Init_Temp[temp] = H_Init[temp];
}
HANDLE_ERROR(cudaMemcpy(dev_H_Init, H_Init_Temp, Section * sizeof(float), cudaMemcpyHostToDevice));
addKernel << <num_blocks, threadsPerBlock >> >(dev_T_New, dev_T_Last, dev_ccml, dev_H_Init, dx, dy, dz, tao, nx, ny, nz, dstOut,Vcast);
dstOut = !dstOut;
HANDLE_ERROR(cudaMemcpy(T_Result, dev_T_Last, nx * ny * nz* sizeof(float), cudaMemcpyDeviceToHost));
float* Mean_TSurface = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result, 0); // calculation the mean surface temperature of steel billets in every cooling sections
for (int temp = 0; temp < CoolSection;temp++)
fitness[t / Num_Iter] += (Mean_TSurface[temp+MoldSection]-Taim[temp]);
if (t % (10 * Num_Iter) == 0)
{
//结晶器的约束
/*int thickness = 0;
for (; thickness < nx / 2; thickness++) {
float *Mean_Thickness = Calculation_MeanTemperature(nx, ny, nz, dy, ccml, T_Result, thickness);
if (Mean_Thickness[MoldSection] > Ts)
break;
}*/
//printf("thickness=%d\n", thickness);
ThickAll = calculateThickness(T_Result, nx, ny, nz, dy, ccml, Ts, 250);//厚度为0.25m
cout << endl<<" ThickAll= " << endl;
for (int temp = 0; temp < CoolSection; temp++)
cout << ThickAll[temp + MoldSection] << ", ";
cout << " time_step = " << t << ", " << "simulation time = " << t * tao;
cout << endl << "TSurface = " << endl;
for (int temp = 0; temp < CoolSection; temp++)
cout << Mean_TSurface[temp + MoldSection] << ", ";
cout << endl << "TSurface - Taim = " << endl;
for (int temp = 0; temp < CoolSection; temp++)
{
cout << (Mean_TSurface[temp + MoldSection] - Taim[temp]) << ", ";
SurfaceError[t / (10 * Num_Iter)][temp] = (Mean_TSurface[temp + MoldSection] - Taim[temp]);
}
cout << endl;
}
}
ofstream fout;
fout.open("F:\\data_zf\\HighTcastGPUMPC3D2block3threads.txt");
if (!fout)
cout << "HighTcastGPUMPC3D2block3threads is not open" << endl;
else
{
for (int j = 0; j < ny; j++)
{
for (int i = 0; i < nx; i++)
{
for (int m = 0; m < nz; m++)
fout << T_Result[nx * nz * j + i * nz + m] << ", ";
fout << endl;
}
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastSurfaceGPUMPC3D2block3threads.txt");
if (!fout)
cout << "HighTcastSurfaceGPUMPC3D2block3threads is not open" << endl;
else
{
for (int j = 0; j < ny; j++)
{
fout << T_Result[nx * nz * j + 0 * nz + int((nx - 1) / 2)] << ", ";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastCenterGPUMPC3D2block3threads.txt");
if (!fout)
cout << "HighTcastCenterGPUMPC3D2block3threads is not open" << endl;
else
{
for (int j = 0; j < ny; j++)
{
fout << T_Result[nx * nz * j + int((nx - 1) / 2) * nz + int((nx - 1) / 2)] << ", ";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastSurfaceErrorGPUMPC3D2block3threads.txt");
if (!fout)
cout << "HighTcastSurfaceErrorGPUMPC3D2block3threads is not open" << endl;
else
{
for (int i = 0; i < TestIter / 10+1; i++)
{
for (int j = 0; j < CoolSection; j++)
fout << SurfaceError[i][j] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastThicknessGPUMPC3D2block3threads.txt");
if (!fout)
cout << "HighTcastThicknessGPUMPC3D2block3threads is not open" << endl;
else
{
for (int i = 0; i < CoolSection; i++) {
fout << ThickAll[i + MoldSection] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastlamda.txt");
if (!fout)
cout << "HighTcastlamda is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
for (int j = 0; j < N; j++)
fout << lamda[i][j] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastgtest.txt");
if (!fout)
cout << "HighTcastgtest is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
for (int j = 0; j < N; j++)
fout << gtest[i][j] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcasthtest.txt");
if (!fout)
cout << "HighTcasthtest is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
for (int j = 0; j < Section; j++)
fout << htest[i][j] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastfitnesstest.txt");
if (!fout)
cout << "HighTcastfitnesstest is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
fout << fitness[i] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcastc.txt");
if (!fout)
cout << "HighTcastc is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
fout << c[i] << ",";
fout << endl;
}
}
fout.close();
fout.open("F:\\data_zf\\HighTcaststep.txt");
if (!fout)
cout << "HighTcaststep is not open" << endl;
else
{
for (int i = 0; i < TestIter; i++)
{
fout << testArray[i] << ",";
fout << endl;
}
}
fout.close();
// Check for any errors launching the kernel
HANDLE_ERROR(cudaGetLastError());
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
Error:
cudaFree(T_Init);
cudaFree(dev_T_New);
cudaFree(dev_T_Last);
cudaFree(dev_ccml);
cudaFree(dev_H_Init);
cudaFree(JacobianMatrix);
cudaFree(JacobianG0);
cudaFree(JacobianG1);
cudaFree(JacobianG2);
cudaFree(JacobinTZgradient);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
__device__ void Physicial_Parameters(float T, float *pho, float *Ce, float *lamd)
{
float Ts = 1462.0, Tl = 1518.0, lamds = 30, lamdl = 50, phos = 7000, phol = 7500, ce = 540.0, L = 265600.0, fs = 0.0;
if (T<Ts)
{
fs = 0;
*pho = phos;
*lamd = lamds;
*Ce = ce;
}
if (T >= Ts&&T <= Tl)
{
fs = (T - Ts) / (Tl - Ts);
*pho = fs*phos + (1 - fs)*phol;
*lamd = fs*lamds + (1 - fs)*lamdl;
*Ce = ce + L / (Tl - Ts);
}
if (T>Tl)
{
fs = 1;
*pho = phol;
*lamd = lamdl;
*Ce = ce;
}
}
__device__ float Boundary_Condition(int j, float dy, float *ccml_zone, float *H_Init)
{
float YLabel, h = 0.0;
YLabel = j*dy;
for (int i = 0; i < Section; i++)
{
if (YLabel >= *(ccml_zone + i) && YLabel <= *(ccml_zone + i + 1))
h = *(H_Init + i);
}
return h;
}
float* Calculation_MeanTemperature(int nx, int ny, int nz, float dy, float *ccml, float *T, float num)
{
float y;
int count = 0;
int i = 0;
float* Mean_Temperature;
Mean_Temperature = new float[Section];
for (int i = 0; i < Section; i++)
{
Mean_Temperature[i] = 0.0;
for (int j = 0; j < ny - num; j++)
{
y = j * dy;
if (y > *(ccml + i) && y <= *(ccml + i + 1))
{
Mean_Temperature[i] = Mean_Temperature[i] + T[nx * nz * j + (int)(num * nz) + int((nx - 1) / 2)];
count++;
}
}
Mean_Temperature[i] = Mean_Temperature[i] / float(count);
count = 0;
}
return Mean_Temperature;
}
float stop_criterion() {
float norm_g = 0.0;
for (int i = 0; i <= N - 1; i++)
norm_g = norm_g + g[i] * g[i];
norm_g = sqrt(norm_g);
//printf("norm_g=%f\n", norm_g);
return(norm_g);
}
float update_c(float norm_g[], float c0,int iter) {//采用Luh论文中公式20
float dM = 2.0, r = 0.5, p = 0.0, c = 10;
if (iter > 0)
{
p = 1.0 - 1.0 / pow(iter, r);//p的更新公式67
alfa[iter - 1] = 1.0 - 1.0 / (dM*pow(iter, p));//这个应该是iter代啊??67
}
if (iter <= StaticIter)
return c0;
else
{
/*for(int i=0;i<iter;i++)
printf(" alfa=%f",alfa[i]);
printf("\n");*/
c = c0*norm_g[0] / norm_g[iter - 1];//c的迭代公式一部分20
//printf("norm_g[0]=%f\n", norm_g[0]);
//printf("norm_g[iter-1]=%f\n", norm_g[iter-1]);
for (int i = 0; i <= iter - 1; i++)
c = c*alfa[i];//c的迭代公式二部分20
}
return(c);
}
float *relationshiphandQ(float *h_Init, float* Q_air)
{
float hx[CoolSection] = { 56.5,40.2,40.2,40.2,40.2,40.2,40.2,40.2 };
float rw[CoolSection] = { 0.845,0.568,0.568,0.568,0.568,0.568,0.568,0.568 };
float ra[CoolSection] = { 0.2,0.1902,0.1902,0.1902,0.1902,0.1902,0.1902,0.1902 };
float hr[CoolSection] = { 0.15,0.082,0.082,0.082,0.082,0.082,0.082,0.082 };
float Sw[CoolSection] = { 1.8,3.86,1.8,1.8, 1.8, 1.8, 1.8, 1.8 };
float Sl[CoolSection] = { 0.5,0.8,2.5,1.8,4.0,3.5,6.0,8.9 };
float *Q_water;
Q_water = new float[CoolSection];
for (int i = 0; i < CoolSection; i++)
{
Q_water[i] = pow((h_Init[i+MoldSection] - hr[i]) / hx[i] / pow(Q_air[i] / (Sl[i] * Sw[i]), ra[i]), 1 / rw[i])*(Sl[i] * Sw[i]);
}
return Q_water;
}
float *calculateThickness(float *T_result, int nx, int ny, int nz, float dy, float *ccml,float Ts,float thick) {
float y;
int count = 0;
float *Mean_Temperature;
Mean_Temperature = new float[Section];
int *thickness;
thickness = new int[Section];
float *res_thickness;
res_thickness = new float[Section];
for (int i = 0; i < Section; i++)
{
for (thickness[i] = 0; thickness[i] <nx; thickness[i]++){
Mean_Temperature[i] = 0.0;
for (int j = 0; j < ny; j++)
{
y = j * dy;
if (y > *(ccml + i) && y <= *(ccml + i + 1))
{
Mean_Temperature[i] = Mean_Temperature[i] + T_result[nx * nz * j + thickness[i] * nz + int((nx - 1) / 2)];
count++;
}
}
Mean_Temperature[i] = Mean_Temperature[i] / float(count);
count = 0;
if (Mean_Temperature[i] > Ts)
break;
}
res_thickness[i]=thickness[i] * thick / nx;
}
return res_thickness;
}
|
e9e22b7bd3d9118a50eead43e98f29cf3fd99a75.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <string.h>
#define DATAFILE "./data.bin"
#define OUTFILE "./snapshot.bin"
//Volume Control Block, which constains volume details
//Including #of blocks, #of free blocks, block size, free block pointers or array
#define SUPERBLOCK_SIZE 4096 //4KB
//File Control Block, which is a Storage Structure consisting of information about a file
#define FCB_SIZE 32 //32 bytes per FCB
#define FCB_ENTRIES 1024
//Total size of avaible memory
#define STORAGE_SIZE 1085440 //1060KB
#define STORAGE_BLOCK_SIZE 32
#define MAX_FILENAME_SIZE 20 //20 bytes
//max number of files
#define MAX_FILE_NUM 1024
//max size of file
#define MAX_ONE_FILE_SIZE 1024
//max size of file name
#define MAX_FILE_NAME 20
//The maximum size of file memory
#define MAX_FILE_SIZE 1048576 //1024KB
#define BIT_TO_BYTE 8 //Used in the conversion of bit to byte
//the start position of file memory
#define FILE_STORAGE_START (STORAGE_SIZE-MAX_FILE_SIZE)
#define BITS_IN_BYTE 8
#define OP_ERROR -1
#define TRUE 1
#define FALSE 0
//for valid/invalid FCB and valid/free block
#define VALID 1
#define FREE 0
#define INVALID 0
//for bitmap
#define FREE_BLOCK_MASK 0x1
#define FREE_BLOCK_BIT 0
#define NON_FREE_BLOCK_MASK 1
//used as read/write flag
#define G_READ 0
#define G_WRITE 1
//gsys flags
#define RM 0
#define LS_D 1
#define LS_S 2
typedef unsigned char uchar;
typedef uint32_t u32;
//storing syatem information
typedef struct {
uchar bitmap[SUPERBLOCK_SIZE]; //the bitmap recording free blocks
u32 file_num ; //number of files in the file system
u32 file_list_time[MAX_FILE_NUM]; //list files in order of decreasing modified time
u32 file_list_size[MAX_FILE_NUM]; //list files in order of decreasing file size
}FileSystem;
//FCB entry
typedef struct {
char name[MAX_FILE_NAME]; //file name
u32 valid_entry ; //indicate whether this entry is valid
u32 op ; //the allowed operations of the file
u32 time ; //the last modified time of a file
u32 block_num ; //the index of its file block
u32 file_size ; //the size of a file
}FCB;
//FCB array pointer
__device__ FCB *fcb_table;
//system struct pointer
__device__ FileSystem *file_system;
//total storage
__device__ uchar volume_d[STORAGE_SIZE];
/* Get the values for bitmap*/
__device__ u32 get_bitmap(u32 index)
{
u32 start_pos = index / BITS_IN_BYTE;
u32 offset = index%BITS_IN_BYTE;
return ((file_system->bitmap[start_pos]) >> offset)&FREE_BLOCK_MASK;
}
/* Set the values for bitmap*/
__device__ void set_bitmap(u32 index, u32 flag)
{
u32 start_pos = index / BITS_IN_BYTE;
u32 offset = index%BITS_IN_BYTE;
if (flag == VALID) file_system->bitmap[start_pos] = file_system->bitmap[start_pos] | (VALID << offset);
else file_system->bitmap[start_pos] = file_system->bitmap[start_pos] & (~(VALID << offset));
}
/* Compare if two file names are the same */
__device__ bool compare_name(const char *dest, const char *src)
{
int index = 0;
while (index<MAX_FILE_NAME) {
if (src[index] != dest[index]) return false;
else if (src[index] == '\0' && dest[index] == '\0') return true;
index++;
}
return true;
}
/* Copy file names*/
__device__ void cpy_filename(char *dest, const char *src)
{
u32 index = 0;
while (src[index] != '\0') {
if (index<MAX_FILE_NAME) dest[index] = src[index];
else {
printf("The file name exceeds the maximum file name length\n");
break;
}
index++;
}
dest[index] = '\0';
}
/* Open Function Implementation*/
__device__ u32 open(const char *s, int op)
{
/* Implement open operation here */
//the index of the FCB entry of a file
u32 file_fcb = -1;
//the index of a free FCB entry
u32 free_fcb = -1;
//Find file is whether exist in FCB or not
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (compare_name(fcb_table[i].name, s) ) {
//If found
if (fcb_table[i].valid_entry == VALID) {
file_fcb = i;
fcb_table[i].op = op;
return file_fcb;
}
}
if (fcb_table[i].valid_entry == FREE) free_fcb = i;
}
//if not found
if (file_fcb ==-1) {
//the index of a free block
u32 free_block = -1;
//search the bitmap for free block
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (get_bitmap(i) == FREE) {
free_block = i;
break;
}
}
//if there is a free block, create a new FCB and record its block number
if (free_block != -1) {
//renew modified time of other files
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (fcb_table[i].valid_entry == VALID) fcb_table[i].time++;
}
//set the modified time of the new file
fcb_table[free_fcb].time = 0;
fcb_table[free_fcb].op = op;
//set the FCB entry valid
fcb_table[free_fcb].valid_entry = TRUE;
//make FCB point to the free block
fcb_table[free_fcb].block_num = free_block;
//set the file name
cpy_filename(fcb_table[free_fcb].name, s);
//set bitmap to indicate it is occupied
set_bitmap(free_block, VALID);
//renew total number of files
file_system->file_num++;
//renew file lists
file_system->file_list_time[file_system->file_num - 1] = free_fcb;
file_system->file_list_size[file_system->file_num - 1] = free_fcb;
return free_fcb;
}
//if no free blocks are available, return error;
else {
printf("no free block\n");
return OP_ERROR;
}
}
}
/* Remove Function Implementation */
__device__ void rm(const char *fileName)
{
//the FCB entry of the to-be-removed file
u32 file_fcb = -1;
//search for FCB by file name
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (compare_name(fcb_table[i].name, fileName)) {
file_fcb = i;
break;
}
}
//if found
if (file_fcb != -1) {
//the real position of the file
u32 file_start = fcb_table[file_fcb].block_num*FCB_ENTRIES;
//the modified time of the file
u32 time = fcb_table[file_fcb].time;
u32 flag = FALSE;
//remove file in file list
for (int i = 0; i<file_system->file_num; i++) {
if (file_system->file_list_time[i] == file_fcb) flag = TRUE;
if (flag == TRUE && i != file_system->file_num - 1)
file_system->file_list_time[i] = file_system->file_list_time[i + 1];
else if (flag == TRUE && i == file_system->file_num - 1)
file_system->file_list_time[i] = 0;
}
flag = FALSE;
for (int i = 0; i<file_system->file_num; i++) {
if (file_system->file_list_size[i] == file_fcb) flag = TRUE;
if (flag == TRUE && i != file_system->file_num - 1)
file_system->file_list_size[i] = file_system->file_list_size[i + 1];
else if (flag == TRUE && i == file_system->file_num - 1)
file_system->file_list_size[i] = 0;
}
//reset system info
file_system->file_num--;
set_bitmap(fcb_table[file_fcb].block_num, FREE);
//reset modified time of other files
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (fcb_table[i].valid_entry == VALID) {
if (fcb_table[i].time>time)
fcb_table[i].time--;
}
}
//clear file content
for (int i = 0; i<MAX_ONE_FILE_SIZE; i++)
volume_d[FILE_STORAGE_START + file_start + i] = 0;
//reset FCB block
fcb_table[file_fcb].valid_entry = FREE;
fcb_table[file_fcb].op = G_READ;
fcb_table[file_fcb].time = 0;
fcb_table[file_fcb].block_num = 0;
fcb_table[file_fcb].file_size = 0;
for (int j = 0; j<MAX_FILE_NAME; j++)
fcb_table[file_fcb].name[j] = 0;
}
//if not found
else printf("Cannot find file %s\n", fileName);
}
/* Write Function Implementation */
__device__ u32 write(const uchar *input, u32 size, u32 fp)
{
//if file is not in write op, return error
if (fcb_table[fp].op != G_WRITE) {
printf("%s is not in write op\n", fcb_table[fp].name);
return OP_ERROR;
}
u32 file_start = fcb_table[fp].block_num*FCB_ENTRIES;
u32 previous_time = fcb_table[fp].time;
u32 count;
if (size < MAX_ONE_FILE_SIZE) count = size;
else count = MAX_ONE_FILE_SIZE;
//if bytes to write is more than max file size
if (size>MAX_ONE_FILE_SIZE)
printf("Cannot write more than 1024 bytes in a file\n");
//write the file
for (int i = 0; i<count; i++)
volume_d[FILE_STORAGE_START + file_start + i] = input[i];
//renew the file size in FCB
fcb_table[fp].file_size = count;
//renew modified time in FCB
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (fcb_table[i].valid_entry == VALID && fcb_table[i].time <= previous_time) {
fcb_table[i].time++;
}
}
fcb_table[fp].time = 0;
//renew file lists
for (int i = 0; i<file_system->file_num - 1; i++) {
if (fcb_table[file_system->file_list_time[i]].time <= previous_time)
file_system->file_list_time[i] = file_system->file_list_time[i + 1];
}
file_system->file_list_time[file_system->file_num - 1] = fp;
u32 start_idx = -1;
u32 end_idx = file_system->file_num;
u32 flag = FALSE;
for (int i = 0; i<file_system->file_num; i++) {
if (file_system->file_list_size[i] == fp) start_idx = i;
if (fcb_table[file_system->file_list_size[i]].file_size <= count && flag == FALSE){
flag = TRUE;
end_idx = i;
}
}
//if we don't find the final position, set it the tail of list
if (end_idx>start_idx) {
u32 temp = file_system->file_list_size[start_idx];
for (int i = start_idx; i<end_idx - 1; i++)
file_system->file_list_size[i] = file_system->file_list_size[i + 1];
file_system->file_list_size[end_idx - 1] = temp;
}
else if (end_idx<start_idx) {
u32 temp = file_system->file_list_size[start_idx];
for (int i = start_idx; i>end_idx; i--)
file_system->file_list_size[i] = file_system->file_list_size[i - 1];
file_system->file_list_size[end_idx] = temp;
}
//return number of bytes written
return count;
}
/* Read Function Implementation */
__device__ u32 read(uchar *output, u32 size, u32 fp)
{
//if file is not in read op, return error
if (fcb_table[fp].op != G_READ) {
printf("%s is not in read op\n", fcb_table[fp].name);
return OP_ERROR;
}
u32 file_start = fcb_table[fp].block_num*FCB_ENTRIES;
u32 count;
if (size < fcb_table[fp].file_size) count = size;
else count = fcb_table[fp].file_size;
//if bytes to read is more than max file size
if (size>fcb_table[fp].file_size)
printf("Cannot read more than file size\n");
//read the file
for (int i = 0; i<count; i++)
output[i] = volume_d[FILE_STORAGE_START + file_start + i];
//return number of bytes read
return count;
}
/* LS_D and LS_S Implementation */
__device__ void gsys(int op)
{
char *name;
u32 size=0;
/* Implement LS_D and LS_S operation here */
//LS_D Operation
if (op==LS_D){
printf("===sort by modified time===\n");
for(int i=file_system->file_num-1;i>=0;i--){
name = fcb_table[file_system->file_list_time[i]].name;
printf("%s\n",name);
}
}
//LS_S Operation
else if(op==LS_S){
printf("===sort by file size===\n");
for(int i=0;i<file_system->file_num;i++){
name=fcb_table[file_system->file_list_size[i]].name;
size=fcb_table[file_system->file_list_size[i]].file_size;
printf("%s %d\n",name,size);
}
}
else printf("The command is invalid\n");
}
/* RM Implementation */
__device__ void gsys(int op, char *s)
{
/* Implement rm operation here */
if (op==RM){
rm(s);
}
else printf("The command is invalid\n");
}
__host__ void write_binaryFile(char *fileName, void *buffer, int bufferSize)
{
FILE *fp;
fp = fopen(fileName, "wb");
fwrite(buffer, 1, bufferSize, fp);
fclose(fp);
}
__host__ int load_binaryFile(char *fileName, void *buffer, int bufferSize)
{
FILE *fp;
fp = fopen(fileName, "rb");
if (!fp)
{
printf("***Unable to open file %s***\n", fileName);
exit(1);
}
//Get file length
fseek(fp, 0, SEEK_END);
int fileLen = ftell(fp);
fseek(fp, 0, SEEK_SET);
if (fileLen > bufferSize)
{
printf("****invalid testcase!!****\n");
printf("****software warrning: the file: %s size****\n", fileName);
printf("****is greater than buffer size****\n");
exit(1);
}
//Read file contents into buffer
fread(buffer, fileLen, 1, fp);
fclose(fp);
return fileLen;
}
__device__ void init_volume()
{
file_system = (FileSystem *)volume_d;
fcb_table = (FCB *)(volume_d + sizeof(*file_system));
for (int i = 0; i<MAX_FILE_NUM / BITS_IN_BYTE; i++)
file_system->bitmap[i] = 0;
file_system->file_num = 0;
for (int i = 0; i<MAX_FILE_NUM; i++) {
file_system->file_list_time[i] = 0;
file_system->file_list_size[i] = 0;
}
for (int i = 0; i<MAX_FILE_NUM; i++) {
fcb_table[i].valid_entry = FREE;
fcb_table[i].op = G_READ;
fcb_table[i].time = 0;
fcb_table[i].block_num = 0;
fcb_table[i].file_size = 0;
for (int j = 0; j<MAX_FILE_NAME; j++) {
fcb_table[i].name[j] = 0;
}
}
}
__global__ void mykernel(uchar *input, uchar *output)
{
init_volume();
/**************************************
* Test Case 1
***************************************/
// kernel test start
u32 fp = open("t.txt\0", G_WRITE);
write(input, 64, fp);
fp = open("b.txt\0", G_WRITE);
write(input + 32, 32, fp);
fp = open("t.txt\0", G_WRITE);
write(input + 32, 32, fp);
fp = open("t.txt\0", G_READ);
read(output, 32, fp);
gsys(LS_D);
gsys(LS_S);
fp = open("b.txt\0", G_WRITE);
write(input + 64, 12, fp);
gsys(LS_S);
gsys(LS_D);
gsys(RM, "t.txt\0");
gsys(LS_S);
// kernel test end
/*/
/**************************************
* Test Case 2
**************************************
//kernel test start
u32 fp = open("t.txt\0", G_WRITE);
write(input, 64, fp);
fp = open("b.txt\0", G_WRITE);
write(input+32, 32, fp);
fp = open("t.txt\0", G_WRITE);
write(input+32, 32, fp);
fp = open("t.txt\0", G_READ);
read(output, 32, fp);
gsys(LS_D);
gsys(LS_S);
fp = open("b.txt\0", G_WRITE);
write(input+64, 12, fp);
gsys(LS_S);
gsys(LS_D);
gsys(RM, "t.txt\0");
gsys(LS_S);
char fname[10][20];
for(int i = 0; i < 10; i++)
{
fname[i][0] = i+33;
for(int j = 1; j < 19; j++)
fname[i][j] = 64+j;
fname[i][19] = '\0';
}
for(int i = 0; i < 10; i++)
{
fp = open(fname[i], G_WRITE);
write(input+i, 24+i, fp);
}
gsys(LS_S);
for(int i = 0; i < 5; i++)
gsys(RM, fname[i]);
gsys(LS_D);
// kernel test end
*/
/**************************************
* Test Case 3
**************************************
//kernel test start
u32 fp = open("t.txt\0", G_WRITE);
write(input, 64, fp);
fp = open("b.txt\0", G_WRITE);
write(input+32, 32, fp);
fp = open("t.txt\0", G_WRITE);
write(input+32, 32, fp);
fp = open("t.txt\0", G_READ);
read(output, 32, fp);
gsys(LS_D);
gsys(LS_S);
fp = open("b.txt\0", G_WRITE);
write(input+64, 12, fp);
gsys(LS_S);
gsys(LS_D);
gsys(RM, "t.txt\0");
gsys(LS_S);
char fname[10][20];
for(int i = 0; i < 10; i++)
{
fname[i][0] = i+33;
for(int j = 1; j < 19; j++)
fname[i][j] = 64+j;
fname[i][19] = '\0';
}
for(int i = 0; i < 10; i++)
{
fp = open(fname[i], G_WRITE);
write(input+i, 24+i, fp);
}
gsys(LS_S);
for(int i = 0; i < 5; i++)
gsys(RM, fname[i]);
gsys(LS_D);
char fname2[1018][20];
int p = 0;
for(int k = 2; k < 15; k++)
for(int i = 50; i <= 126; i++, p++)
{
fname2[p][0] = i;
for(int j = 1; j < k; j++)
fname2[p][j] = 64+j;
fname2[p][k] = '\0';
}
for(int i = 0 ; i < 1001; i++)
{
fp = open(fname2[i], G_WRITE);
write(input+i, 24+i, fp);
}
gsys(LS_S);
fp = open(fname2[1000], G_READ);
read(output+1000, 1024, fp);
char fname3[17][3];
for(int i = 0; i < 17; i++)
{
fname3[i][0] = 97+i;
fname3[i][1] = 97+i;
fname3[i][2] = '\0';
fp = open(fname3[i], G_WRITE);
write(input+1024*i, 1024, fp);
}
fp = open("EA\0", G_WRITE);
write(input+1024*100, 1024, fp);
gsys(LS_S);
//kernel test end
*/
}
/************************************************************************************
*
* Main function
*
************************************************************************************/
int main()
{
uchar *input_h;
uchar *input;
uchar *output_h;
uchar *output;
input_h = (uchar *)malloc(sizeof(uchar)* MAX_FILE_SIZE);
output_h = (uchar *)malloc(sizeof(uchar)* MAX_FILE_SIZE);
hipMalloc(&input, sizeof(uchar)* MAX_FILE_SIZE);
hipMalloc(&output, sizeof(uchar)* MAX_FILE_SIZE);
// load binary file from data.bin
load_binaryFile(DATAFILE, input_h, MAX_FILE_SIZE);
hipMemcpy(input, input_h, sizeof(uchar)* MAX_FILE_SIZE, hipMemcpyHostToDevice);
hipMemcpy(output, output_h, sizeof(uchar)* MAX_FILE_SIZE, hipMemcpyHostToDevice);
mykernel << <1, 1 >> >(input, output);
hipMemcpy(output_h, output, sizeof(uchar)* MAX_FILE_SIZE, hipMemcpyDeviceToHost);
// dump output array to snapshot.bin
write_binaryFile(OUTFILE, output_h, MAX_FILE_SIZE);
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| e9e22b7bd3d9118a50eead43e98f29cf3fd99a75.cu | #include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <string.h>
#define DATAFILE "./data.bin"
#define OUTFILE "./snapshot.bin"
//Volume Control Block, which constains volume details
//Including #of blocks, #of free blocks, block size, free block pointers or array
#define SUPERBLOCK_SIZE 4096 //4KB
//File Control Block, which is a Storage Structure consisting of information about a file
#define FCB_SIZE 32 //32 bytes per FCB
#define FCB_ENTRIES 1024
//Total size of avaible memory
#define STORAGE_SIZE 1085440 //1060KB
#define STORAGE_BLOCK_SIZE 32
#define MAX_FILENAME_SIZE 20 //20 bytes
//max number of files
#define MAX_FILE_NUM 1024
//max size of file
#define MAX_ONE_FILE_SIZE 1024
//max size of file name
#define MAX_FILE_NAME 20
//The maximum size of file memory
#define MAX_FILE_SIZE 1048576 //1024KB
#define BIT_TO_BYTE 8 //Used in the conversion of bit to byte
//the start position of file memory
#define FILE_STORAGE_START (STORAGE_SIZE-MAX_FILE_SIZE)
#define BITS_IN_BYTE 8
#define OP_ERROR -1
#define TRUE 1
#define FALSE 0
//for valid/invalid FCB and valid/free block
#define VALID 1
#define FREE 0
#define INVALID 0
//for bitmap
#define FREE_BLOCK_MASK 0x1
#define FREE_BLOCK_BIT 0
#define NON_FREE_BLOCK_MASK 1
//used as read/write flag
#define G_READ 0
#define G_WRITE 1
//gsys flags
#define RM 0
#define LS_D 1
#define LS_S 2
typedef unsigned char uchar;
typedef uint32_t u32;
//storing syatem information
typedef struct {
uchar bitmap[SUPERBLOCK_SIZE]; //the bitmap recording free blocks
u32 file_num ; //number of files in the file system
u32 file_list_time[MAX_FILE_NUM]; //list files in order of decreasing modified time
u32 file_list_size[MAX_FILE_NUM]; //list files in order of decreasing file size
}FileSystem;
//FCB entry
typedef struct {
char name[MAX_FILE_NAME]; //file name
u32 valid_entry ; //indicate whether this entry is valid
u32 op ; //the allowed operations of the file
u32 time ; //the last modified time of a file
u32 block_num ; //the index of its file block
u32 file_size ; //the size of a file
}FCB;
//FCB array pointer
__device__ FCB *fcb_table;
//system struct pointer
__device__ FileSystem *file_system;
//total storage
__device__ uchar volume_d[STORAGE_SIZE];
/* Get the values for bitmap*/
__device__ u32 get_bitmap(u32 index)
{
u32 start_pos = index / BITS_IN_BYTE;
u32 offset = index%BITS_IN_BYTE;
return ((file_system->bitmap[start_pos]) >> offset)&FREE_BLOCK_MASK;
}
/* Set the values for bitmap*/
__device__ void set_bitmap(u32 index, u32 flag)
{
u32 start_pos = index / BITS_IN_BYTE;
u32 offset = index%BITS_IN_BYTE;
if (flag == VALID) file_system->bitmap[start_pos] = file_system->bitmap[start_pos] | (VALID << offset);
else file_system->bitmap[start_pos] = file_system->bitmap[start_pos] & (~(VALID << offset));
}
/* Compare if two file names are the same */
__device__ bool compare_name(const char *dest, const char *src)
{
int index = 0;
while (index<MAX_FILE_NAME) {
if (src[index] != dest[index]) return false;
else if (src[index] == '\0' && dest[index] == '\0') return true;
index++;
}
return true;
}
/* Copy file names*/
__device__ void cpy_filename(char *dest, const char *src)
{
u32 index = 0;
while (src[index] != '\0') {
if (index<MAX_FILE_NAME) dest[index] = src[index];
else {
printf("The file name exceeds the maximum file name length\n");
break;
}
index++;
}
dest[index] = '\0';
}
/* Open Function Implementation*/
__device__ u32 open(const char *s, int op)
{
/* Implement open operation here */
//the index of the FCB entry of a file
u32 file_fcb = -1;
//the index of a free FCB entry
u32 free_fcb = -1;
//Find file is whether exist in FCB or not
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (compare_name(fcb_table[i].name, s) ) {
//If found
if (fcb_table[i].valid_entry == VALID) {
file_fcb = i;
fcb_table[i].op = op;
return file_fcb;
}
}
if (fcb_table[i].valid_entry == FREE) free_fcb = i;
}
//if not found
if (file_fcb ==-1) {
//the index of a free block
u32 free_block = -1;
//search the bitmap for free block
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (get_bitmap(i) == FREE) {
free_block = i;
break;
}
}
//if there is a free block, create a new FCB and record its block number
if (free_block != -1) {
//renew modified time of other files
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (fcb_table[i].valid_entry == VALID) fcb_table[i].time++;
}
//set the modified time of the new file
fcb_table[free_fcb].time = 0;
fcb_table[free_fcb].op = op;
//set the FCB entry valid
fcb_table[free_fcb].valid_entry = TRUE;
//make FCB point to the free block
fcb_table[free_fcb].block_num = free_block;
//set the file name
cpy_filename(fcb_table[free_fcb].name, s);
//set bitmap to indicate it is occupied
set_bitmap(free_block, VALID);
//renew total number of files
file_system->file_num++;
//renew file lists
file_system->file_list_time[file_system->file_num - 1] = free_fcb;
file_system->file_list_size[file_system->file_num - 1] = free_fcb;
return free_fcb;
}
//if no free blocks are available, return error;
else {
printf("no free block\n");
return OP_ERROR;
}
}
}
/* Remove Function Implementation */
__device__ void rm(const char *fileName)
{
//the FCB entry of the to-be-removed file
u32 file_fcb = -1;
//search for FCB by file name
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (compare_name(fcb_table[i].name, fileName)) {
file_fcb = i;
break;
}
}
//if found
if (file_fcb != -1) {
//the real position of the file
u32 file_start = fcb_table[file_fcb].block_num*FCB_ENTRIES;
//the modified time of the file
u32 time = fcb_table[file_fcb].time;
u32 flag = FALSE;
//remove file in file list
for (int i = 0; i<file_system->file_num; i++) {
if (file_system->file_list_time[i] == file_fcb) flag = TRUE;
if (flag == TRUE && i != file_system->file_num - 1)
file_system->file_list_time[i] = file_system->file_list_time[i + 1];
else if (flag == TRUE && i == file_system->file_num - 1)
file_system->file_list_time[i] = 0;
}
flag = FALSE;
for (int i = 0; i<file_system->file_num; i++) {
if (file_system->file_list_size[i] == file_fcb) flag = TRUE;
if (flag == TRUE && i != file_system->file_num - 1)
file_system->file_list_size[i] = file_system->file_list_size[i + 1];
else if (flag == TRUE && i == file_system->file_num - 1)
file_system->file_list_size[i] = 0;
}
//reset system info
file_system->file_num--;
set_bitmap(fcb_table[file_fcb].block_num, FREE);
//reset modified time of other files
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (fcb_table[i].valid_entry == VALID) {
if (fcb_table[i].time>time)
fcb_table[i].time--;
}
}
//clear file content
for (int i = 0; i<MAX_ONE_FILE_SIZE; i++)
volume_d[FILE_STORAGE_START + file_start + i] = 0;
//reset FCB block
fcb_table[file_fcb].valid_entry = FREE;
fcb_table[file_fcb].op = G_READ;
fcb_table[file_fcb].time = 0;
fcb_table[file_fcb].block_num = 0;
fcb_table[file_fcb].file_size = 0;
for (int j = 0; j<MAX_FILE_NAME; j++)
fcb_table[file_fcb].name[j] = 0;
}
//if not found
else printf("Cannot find file %s\n", fileName);
}
/* Write Function Implementation */
__device__ u32 write(const uchar *input, u32 size, u32 fp)
{
//if file is not in write op, return error
if (fcb_table[fp].op != G_WRITE) {
printf("%s is not in write op\n", fcb_table[fp].name);
return OP_ERROR;
}
u32 file_start = fcb_table[fp].block_num*FCB_ENTRIES;
u32 previous_time = fcb_table[fp].time;
u32 count;
if (size < MAX_ONE_FILE_SIZE) count = size;
else count = MAX_ONE_FILE_SIZE;
//if bytes to write is more than max file size
if (size>MAX_ONE_FILE_SIZE)
printf("Cannot write more than 1024 bytes in a file\n");
//write the file
for (int i = 0; i<count; i++)
volume_d[FILE_STORAGE_START + file_start + i] = input[i];
//renew the file size in FCB
fcb_table[fp].file_size = count;
//renew modified time in FCB
for (int i = 0; i<MAX_FILE_NUM; i++) {
if (fcb_table[i].valid_entry == VALID && fcb_table[i].time <= previous_time) {
fcb_table[i].time++;
}
}
fcb_table[fp].time = 0;
//renew file lists
for (int i = 0; i<file_system->file_num - 1; i++) {
if (fcb_table[file_system->file_list_time[i]].time <= previous_time)
file_system->file_list_time[i] = file_system->file_list_time[i + 1];
}
file_system->file_list_time[file_system->file_num - 1] = fp;
u32 start_idx = -1;
u32 end_idx = file_system->file_num;
u32 flag = FALSE;
for (int i = 0; i<file_system->file_num; i++) {
if (file_system->file_list_size[i] == fp) start_idx = i;
if (fcb_table[file_system->file_list_size[i]].file_size <= count && flag == FALSE){
flag = TRUE;
end_idx = i;
}
}
//if we don't find the final position, set it the tail of list
if (end_idx>start_idx) {
u32 temp = file_system->file_list_size[start_idx];
for (int i = start_idx; i<end_idx - 1; i++)
file_system->file_list_size[i] = file_system->file_list_size[i + 1];
file_system->file_list_size[end_idx - 1] = temp;
}
else if (end_idx<start_idx) {
u32 temp = file_system->file_list_size[start_idx];
for (int i = start_idx; i>end_idx; i--)
file_system->file_list_size[i] = file_system->file_list_size[i - 1];
file_system->file_list_size[end_idx] = temp;
}
//return number of bytes written
return count;
}
/* Read Function Implementation */
__device__ u32 read(uchar *output, u32 size, u32 fp)
{
//if file is not in read op, return error
if (fcb_table[fp].op != G_READ) {
printf("%s is not in read op\n", fcb_table[fp].name);
return OP_ERROR;
}
u32 file_start = fcb_table[fp].block_num*FCB_ENTRIES;
u32 count;
if (size < fcb_table[fp].file_size) count = size;
else count = fcb_table[fp].file_size;
//if bytes to read is more than max file size
if (size>fcb_table[fp].file_size)
printf("Cannot read more than file size\n");
//read the file
for (int i = 0; i<count; i++)
output[i] = volume_d[FILE_STORAGE_START + file_start + i];
//return number of bytes read
return count;
}
/* LS_D and LS_S Implementation */
__device__ void gsys(int op)
{
char *name;
u32 size=0;
/* Implement LS_D and LS_S operation here */
//LS_D Operation
if (op==LS_D){
printf("===sort by modified time===\n");
for(int i=file_system->file_num-1;i>=0;i--){
name = fcb_table[file_system->file_list_time[i]].name;
printf("%s\n",name);
}
}
//LS_S Operation
else if(op==LS_S){
printf("===sort by file size===\n");
for(int i=0;i<file_system->file_num;i++){
name=fcb_table[file_system->file_list_size[i]].name;
size=fcb_table[file_system->file_list_size[i]].file_size;
printf("%s %d\n",name,size);
}
}
else printf("The command is invalid\n");
}
/* RM Implementation */
__device__ void gsys(int op, char *s)
{
/* Implement rm operation here */
if (op==RM){
rm(s);
}
else printf("The command is invalid\n");
}
__host__ void write_binaryFile(char *fileName, void *buffer, int bufferSize)
{
FILE *fp;
fp = fopen(fileName, "wb");
fwrite(buffer, 1, bufferSize, fp);
fclose(fp);
}
__host__ int load_binaryFile(char *fileName, void *buffer, int bufferSize)
{
FILE *fp;
fp = fopen(fileName, "rb");
if (!fp)
{
printf("***Unable to open file %s***\n", fileName);
exit(1);
}
//Get file length
fseek(fp, 0, SEEK_END);
int fileLen = ftell(fp);
fseek(fp, 0, SEEK_SET);
if (fileLen > bufferSize)
{
printf("****invalid testcase!!****\n");
printf("****software warrning: the file: %s size****\n", fileName);
printf("****is greater than buffer size****\n");
exit(1);
}
//Read file contents into buffer
fread(buffer, fileLen, 1, fp);
fclose(fp);
return fileLen;
}
__device__ void init_volume()
{
file_system = (FileSystem *)volume_d;
fcb_table = (FCB *)(volume_d + sizeof(*file_system));
for (int i = 0; i<MAX_FILE_NUM / BITS_IN_BYTE; i++)
file_system->bitmap[i] = 0;
file_system->file_num = 0;
for (int i = 0; i<MAX_FILE_NUM; i++) {
file_system->file_list_time[i] = 0;
file_system->file_list_size[i] = 0;
}
for (int i = 0; i<MAX_FILE_NUM; i++) {
fcb_table[i].valid_entry = FREE;
fcb_table[i].op = G_READ;
fcb_table[i].time = 0;
fcb_table[i].block_num = 0;
fcb_table[i].file_size = 0;
for (int j = 0; j<MAX_FILE_NAME; j++) {
fcb_table[i].name[j] = 0;
}
}
}
__global__ void mykernel(uchar *input, uchar *output)
{
init_volume();
/**************************************
* Test Case 1
***************************************/
// kernel test start
u32 fp = open("t.txt\0", G_WRITE);
write(input, 64, fp);
fp = open("b.txt\0", G_WRITE);
write(input + 32, 32, fp);
fp = open("t.txt\0", G_WRITE);
write(input + 32, 32, fp);
fp = open("t.txt\0", G_READ);
read(output, 32, fp);
gsys(LS_D);
gsys(LS_S);
fp = open("b.txt\0", G_WRITE);
write(input + 64, 12, fp);
gsys(LS_S);
gsys(LS_D);
gsys(RM, "t.txt\0");
gsys(LS_S);
// kernel test end
/*/
/**************************************
* Test Case 2
**************************************
//kernel test start
u32 fp = open("t.txt\0", G_WRITE);
write(input, 64, fp);
fp = open("b.txt\0", G_WRITE);
write(input+32, 32, fp);
fp = open("t.txt\0", G_WRITE);
write(input+32, 32, fp);
fp = open("t.txt\0", G_READ);
read(output, 32, fp);
gsys(LS_D);
gsys(LS_S);
fp = open("b.txt\0", G_WRITE);
write(input+64, 12, fp);
gsys(LS_S);
gsys(LS_D);
gsys(RM, "t.txt\0");
gsys(LS_S);
char fname[10][20];
for(int i = 0; i < 10; i++)
{
fname[i][0] = i+33;
for(int j = 1; j < 19; j++)
fname[i][j] = 64+j;
fname[i][19] = '\0';
}
for(int i = 0; i < 10; i++)
{
fp = open(fname[i], G_WRITE);
write(input+i, 24+i, fp);
}
gsys(LS_S);
for(int i = 0; i < 5; i++)
gsys(RM, fname[i]);
gsys(LS_D);
// kernel test end
*/
/**************************************
* Test Case 3
**************************************
//kernel test start
u32 fp = open("t.txt\0", G_WRITE);
write(input, 64, fp);
fp = open("b.txt\0", G_WRITE);
write(input+32, 32, fp);
fp = open("t.txt\0", G_WRITE);
write(input+32, 32, fp);
fp = open("t.txt\0", G_READ);
read(output, 32, fp);
gsys(LS_D);
gsys(LS_S);
fp = open("b.txt\0", G_WRITE);
write(input+64, 12, fp);
gsys(LS_S);
gsys(LS_D);
gsys(RM, "t.txt\0");
gsys(LS_S);
char fname[10][20];
for(int i = 0; i < 10; i++)
{
fname[i][0] = i+33;
for(int j = 1; j < 19; j++)
fname[i][j] = 64+j;
fname[i][19] = '\0';
}
for(int i = 0; i < 10; i++)
{
fp = open(fname[i], G_WRITE);
write(input+i, 24+i, fp);
}
gsys(LS_S);
for(int i = 0; i < 5; i++)
gsys(RM, fname[i]);
gsys(LS_D);
char fname2[1018][20];
int p = 0;
for(int k = 2; k < 15; k++)
for(int i = 50; i <= 126; i++, p++)
{
fname2[p][0] = i;
for(int j = 1; j < k; j++)
fname2[p][j] = 64+j;
fname2[p][k] = '\0';
}
for(int i = 0 ; i < 1001; i++)
{
fp = open(fname2[i], G_WRITE);
write(input+i, 24+i, fp);
}
gsys(LS_S);
fp = open(fname2[1000], G_READ);
read(output+1000, 1024, fp);
char fname3[17][3];
for(int i = 0; i < 17; i++)
{
fname3[i][0] = 97+i;
fname3[i][1] = 97+i;
fname3[i][2] = '\0';
fp = open(fname3[i], G_WRITE);
write(input+1024*i, 1024, fp);
}
fp = open("EA\0", G_WRITE);
write(input+1024*100, 1024, fp);
gsys(LS_S);
//kernel test end
*/
}
/************************************************************************************
*
* Main function
*
************************************************************************************/
int main()
{
uchar *input_h;
uchar *input;
uchar *output_h;
uchar *output;
input_h = (uchar *)malloc(sizeof(uchar)* MAX_FILE_SIZE);
output_h = (uchar *)malloc(sizeof(uchar)* MAX_FILE_SIZE);
cudaMalloc(&input, sizeof(uchar)* MAX_FILE_SIZE);
cudaMalloc(&output, sizeof(uchar)* MAX_FILE_SIZE);
// load binary file from data.bin
load_binaryFile(DATAFILE, input_h, MAX_FILE_SIZE);
cudaMemcpy(input, input_h, sizeof(uchar)* MAX_FILE_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(output, output_h, sizeof(uchar)* MAX_FILE_SIZE, cudaMemcpyHostToDevice);
mykernel << <1, 1 >> >(input, output);
cudaMemcpy(output_h, output, sizeof(uchar)* MAX_FILE_SIZE, cudaMemcpyDeviceToHost);
// dump output array to snapshot.bin
write_binaryFile(OUTFILE, output_h, MAX_FILE_SIZE);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
7ff7bb26cded3012e88ed9693ebc3d19d91daf88.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xflow/ops.h"
#include "xflow/cuda_helper.h"
using namespace XFlow;
void Activation::map(void)
{
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
helperSetTensorDescriptor(inputs[0], inputTensor);
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
cudnnActivationMode_t mode;
switch (type) {
case OP_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case OP_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
case OP_TANH:
mode = CUDNN_ACTIVATION_TANH;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
if (!inPlace) {
size_t outputSize = sizeof(DATATYPE);
for (int i = 0; i < inputs[0].numDim; i++)
outputSize *= inputs[0].dim[i];
checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize));
} else {
outputs[0].data_ptr = inputs[0].data_ptr;
}
}
void Activation::unmap(void)
{
checkCUDNN(cudnnDestroyTensorDescriptor(inputTensor));
checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc));
if (!inPlace) {
checkCUDA(hipFree(outputs[0].data_ptr));
}
}
void Activation::forward(bool block)
{
const float alpha = 1.0f;
const float beta = 0.0f;
checkCUDNN(cudnnActivationForward(model->dnn, actiDesc,
&alpha, inputTensor, inputs[0].data_ptr,
&beta, inputTensor, outputs[0].data_ptr));
if (block)
checkCUDA(hipDeviceSynchronize());
}
void Model::measure_activation_cost(Activation* act)
{
const float alpha = 1.0f;
const float beta = 0.0f;
helperSetTensorDescriptor(act->inputs[0], inputTensor);
cudnnActivationMode_t mode;
switch (act->type) {
case OP_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case OP_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
case OP_TANH:
mode = CUDNN_ACTIVATION_TANH;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
checkCUDA(hipDeviceSynchronize());
checkCUDA(hipEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
if (act->inPlace) {
checkCUDNN(cudnnActivationForward(dnn, actiDesc,
&alpha, inputTensor, inputPtr,
&beta, inputTensor, inputPtr));
} else {
checkCUDNN(cudnnActivationForward(dnn, actiDesc,
&alpha, inputTensor, inputPtr,
&beta, inputTensor, outputPtr));
}
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
act->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Activation]: i(%d %d %d %d) type(%d) cost(%.4lf)\n",
act->inputs[0].dim[0], act->inputs[0].dim[1], act->inputs[0].dim[2],
act->inputs[0].dim[3], act->type, act->runtime);
}
| 7ff7bb26cded3012e88ed9693ebc3d19d91daf88.cu | /* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xflow/ops.h"
#include "xflow/cuda_helper.h"
using namespace XFlow;
void Activation::map(void)
{
// create descriptors
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
helperSetTensorDescriptor(inputs[0], inputTensor);
checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc));
cudnnActivationMode_t mode;
switch (type) {
case OP_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case OP_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
case OP_TANH:
mode = CUDNN_ACTIVATION_TANH;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
if (!inPlace) {
size_t outputSize = sizeof(DATATYPE);
for (int i = 0; i < inputs[0].numDim; i++)
outputSize *= inputs[0].dim[i];
checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize));
} else {
outputs[0].data_ptr = inputs[0].data_ptr;
}
}
void Activation::unmap(void)
{
checkCUDNN(cudnnDestroyTensorDescriptor(inputTensor));
checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc));
if (!inPlace) {
checkCUDA(cudaFree(outputs[0].data_ptr));
}
}
void Activation::forward(bool block)
{
const float alpha = 1.0f;
const float beta = 0.0f;
checkCUDNN(cudnnActivationForward(model->dnn, actiDesc,
&alpha, inputTensor, inputs[0].data_ptr,
&beta, inputTensor, outputs[0].data_ptr));
if (block)
checkCUDA(cudaDeviceSynchronize());
}
void Model::measure_activation_cost(Activation* act)
{
const float alpha = 1.0f;
const float beta = 0.0f;
helperSetTensorDescriptor(act->inputs[0], inputTensor);
cudnnActivationMode_t mode;
switch (act->type) {
case OP_RELU:
mode = CUDNN_ACTIVATION_RELU;
break;
case OP_SIGMOID:
mode = CUDNN_ACTIVATION_SIGMOID;
break;
case OP_TANH:
mode = CUDNN_ACTIVATION_TANH;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode,
CUDNN_NOT_PROPAGATE_NAN, 0.0));
checkCUDA(cudaDeviceSynchronize());
checkCUDA(cudaEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
if (act->inPlace) {
checkCUDNN(cudnnActivationForward(dnn, actiDesc,
&alpha, inputTensor, inputPtr,
&beta, inputTensor, inputPtr));
} else {
checkCUDNN(cudnnActivationForward(dnn, actiDesc,
&alpha, inputTensor, inputPtr,
&beta, inputTensor, outputPtr));
}
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
act->runtime = milliseconds / REPEAT_TIMES;
if (print_cost)
printf(" measure[Activation]: i(%d %d %d %d) type(%d) cost(%.4lf)\n",
act->inputs[0].dim[0], act->inputs[0].dim[1], act->inputs[0].dim[2],
act->inputs[0].dim[3], act->type, act->runtime);
}
|
e598e58f89521f9b81c9d143a15cd1f2e1ba6d36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define num_thread 256
#define num_block 256
__global__ void blending(float *a,float *b,float *c,float *d,int width,int height,int w,float A,float error_lm,float error_mm,int class_num)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
const int Idx=num_thread*bid+tid;
float r_LM,r_MM, r_center_LM,r_center_MM;
int row,column;
int i,j;
float sum1,sum2;
float st=0.0;
int judge;
float dis;
float wei;
float weih,result;
int kk=0;
int rmin,rmax,smin,smax;
for(int kkk=Idx;kkk<width*height;kkk=kkk+num_thread*num_block)
{
result=0;
judge=0;
wei=0;
kk=0;
sum1=0,sum2=0;
row=kkk/width;
column=kkk%width;
//if(row==1)
// wei=0;
r_center_LM =d[kkk]-b[kkk]+error_lm;
r_center_MM=d[kkk]-c[kkk]+1.412*error_mm;
if(column-w/2<=0)
rmin=0;
else
rmin = column-w/2;
if(column+w/2>=width-1)
rmax = width-1;
else
rmax = column+w/2;
if(row-w/2<=0)
smin=0;
else
smin = row-w/2;
if(row+w/2>=height-1)
smax = height-1;
else
smax = row+w/2;
for(i=smin;i<=smax;i++)
{
for(j=rmin;j<=rmax;j++)
{
sum1+=b[i*width+j]*b[i*width+j];
sum2+=b[i*width+j];
}
}
//if(column==30&&row==30)
// result=0;
st=sqrt(sum1/(w*w)-(sum2/(w*w))*(sum2/(w*w)))/ class_num;
for(i=smin;i<=smax;i++)
{
for(j=rmin;j<=rmax;j++)
{
if(fabs(b[kkk]-b[i*width+j])<st)
{
r_LM=d[i*width+j]-b[i*width+j];
r_MM=d[i*width+j]-c[i*width+j];
if((r_center_LM>0&&r_LM<r_center_LM)||(r_center_LM<0&&r_LM>r_center_LM))
{
if((r_center_MM>0&&r_MM<r_center_MM)||(r_center_MM<0&&r_MM>r_center_MM))
{
r_LM=fabs(r_LM)+0.0001;
r_MM=fabs(r_MM)+0.0001;
if(kkk==i*width+j)
judge=1;
dis=float((row-i)*(row-i)+(column-j)*(column-j));
dis=sqrt(dis)/A+1.0;
weih=1.0/(dis* r_LM*r_MM);
wei+=weih;
result+=weih*(c[i*width+j]+b[i*width+j]-d[i*width+j]);
kk++;
}
}
}
}
}
if(kk==0)
{
a[kkk]=abs(b[kkk]+c[kkk]-d[kkk]);
wei=10000;
}
else
{
if(judge==0)
{
dis=1.0;
r_LM=fabs(d[kkk]-b[kkk])+0.0001;
r_MM=fabs(d[kkk]-c[kkk])+0.0001;
weih=1.0/(dis* r_LM*r_MM);
result+=weih*(b[kkk]+c[kkk]-d[kkk]);
wei+=weih;
}
a[kkk]=result/wei;
//if(a[kkk]<0)
// a[kkk]=(b[kkk]+c[kkk]-d[kkk]);
}
}
} | e598e58f89521f9b81c9d143a15cd1f2e1ba6d36.cu | #include "includes.h"
#define num_thread 256
#define num_block 256
__global__ void blending(float *a,float *b,float *c,float *d,int width,int height,int w,float A,float error_lm,float error_mm,int class_num)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
const int Idx=num_thread*bid+tid;
float r_LM,r_MM, r_center_LM,r_center_MM;
int row,column;
int i,j;
float sum1,sum2;
float st=0.0;
int judge;
float dis;
float wei;
float weih,result;
int kk=0;
int rmin,rmax,smin,smax;
for(int kkk=Idx;kkk<width*height;kkk=kkk+num_thread*num_block)
{
result=0;
judge=0;
wei=0;
kk=0;
sum1=0,sum2=0;
row=kkk/width;
column=kkk%width;
//if(row==1)
// wei=0;
r_center_LM =d[kkk]-b[kkk]+error_lm;
r_center_MM=d[kkk]-c[kkk]+1.412*error_mm;
if(column-w/2<=0)
rmin=0;
else
rmin = column-w/2;
if(column+w/2>=width-1)
rmax = width-1;
else
rmax = column+w/2;
if(row-w/2<=0)
smin=0;
else
smin = row-w/2;
if(row+w/2>=height-1)
smax = height-1;
else
smax = row+w/2;
for(i=smin;i<=smax;i++)
{
for(j=rmin;j<=rmax;j++)
{
sum1+=b[i*width+j]*b[i*width+j];
sum2+=b[i*width+j];
}
}
//if(column==30&&row==30)
// result=0;
st=sqrt(sum1/(w*w)-(sum2/(w*w))*(sum2/(w*w)))/ class_num;
for(i=smin;i<=smax;i++)
{
for(j=rmin;j<=rmax;j++)
{
if(fabs(b[kkk]-b[i*width+j])<st)
{
r_LM=d[i*width+j]-b[i*width+j];
r_MM=d[i*width+j]-c[i*width+j];
if((r_center_LM>0&&r_LM<r_center_LM)||(r_center_LM<0&&r_LM>r_center_LM))
{
if((r_center_MM>0&&r_MM<r_center_MM)||(r_center_MM<0&&r_MM>r_center_MM))
{
r_LM=fabs(r_LM)+0.0001;
r_MM=fabs(r_MM)+0.0001;
if(kkk==i*width+j)
judge=1;
dis=float((row-i)*(row-i)+(column-j)*(column-j));
dis=sqrt(dis)/A+1.0;
weih=1.0/(dis* r_LM*r_MM);
wei+=weih;
result+=weih*(c[i*width+j]+b[i*width+j]-d[i*width+j]);
kk++;
}
}
}
}
}
if(kk==0)
{
a[kkk]=abs(b[kkk]+c[kkk]-d[kkk]);
wei=10000;
}
else
{
if(judge==0)
{
dis=1.0;
r_LM=fabs(d[kkk]-b[kkk])+0.0001;
r_MM=fabs(d[kkk]-c[kkk])+0.0001;
weih=1.0/(dis* r_LM*r_MM);
result+=weih*(b[kkk]+c[kkk]-d[kkk]);
wei+=weih;
}
a[kkk]=result/wei;
//if(a[kkk]<0)
// a[kkk]=(b[kkk]+c[kkk]-d[kkk]);
}
}
} |
4825c5aee9e029b22e60777cf2f3d61b383e55b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetrf_batched_smallsq_shfl.cu, normal z -> s, Thu Oct 8 23:05:37 2020
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// This kernel uses registers for matrix storage, shared mem. and shuffle for communication.
// It also uses lazy swap.
extern __shared__ float ddata[];
template<int N, int NSHFL>
__global__ void
sgetrf_batched_smallsq_shfl_kernel( float** dA_array, int ldda,
magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount) return;
float* dA = dA_array[batchid];
magma_int_t* ipiv = ipiv_array[batchid];
magma_int_t* info = &info_array[batchid];
float rA[N] = {MAGMA_S_ZERO};
float y[N] = {MAGMA_S_ZERO};
float reg = MAGMA_S_ZERO;
float update = MAGMA_S_ZERO;
int max_id, current_piv_tx, rowid = tx, linfo = 0;
float rx_abs_max = MAGMA_D_ZERO;
// shared memory pointers
float* sx = (float*)(ddata);
int* sipiv = (int*)(sx + blockDim.y * NSHFL);
sx += ty * NSHFL;
sipiv += ty * (NSHFL+1);
volatile int* scurrent_piv_tx = (volatile int*)(sipiv + NSHFL);
// read
if( tx < N ){
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
}
#pragma unroll
for(int i = 0; i < N; i++){
sx[ rowid ] = fabs(MAGMA_S_REAL( rA[i] )) + fabs(MAGMA_S_IMAG( rA[i] ));
magmablas_syncwarp();
rx_abs_max = sx[i];
max_id = i;
#pragma unroll
for(int j = i; j < N; j++){
if( sx[j] > rx_abs_max){
max_id = j;
rx_abs_max = sx[j];
}
}
linfo = ( rx_abs_max == MAGMA_D_ZERO && linfo == 0) ? (i+1) : linfo;
update = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_S_ZERO : MAGMA_S_ONE;
if(rowid == max_id){
sipiv[i] = max_id;
rowid = i;
(*scurrent_piv_tx) = tx;
}
else if(rowid == i){
rowid = max_id;
}
current_piv_tx = (*scurrent_piv_tx);
magmablas_syncwarp();
#pragma unroll
for(int j = i; j < N; j++){
y[j] = update * magmablas_sshfl( rA[j], current_piv_tx, NSHFL);
}
reg = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_S_ONE : MAGMA_S_DIV(MAGMA_S_ONE, y[i] );
// scal and ger
if( rowid > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * y[j];
}
}
}
// write
if( tx == 0 ){
(*info) = (magma_int_t)linfo;
}
if(tx < N) {
ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1);
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + rowid ] = rA[i];
}
}
}
/***************************************************************************//**
Purpose
-------
sgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A
using partial pivoting with row interchanges.
This routine can deal only with square matrices of size up to 32
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
Arguments
---------
@param[in]
n INTEGER
The size of each matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_sgetrf_batched_smallsq_shfl(
magma_int_t n,
float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t arginfo = 0;
magma_int_t m = n;
if( (m < 0) || ( m > 32 ) ){
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if( m == 0) return 0;
const magma_int_t ntcol = magma_get_sgetrf_batched_ntcol(m, n);
magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int);
shmem += ntcol * magma_ceilpow2(m) * sizeof(float);
shmem += ntcol * 1 * sizeof(int);
dim3 threads(magma_ceilpow2(m), ntcol, 1);
const magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 grid(gridx, 1, 1);
switch(m){
case 1:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel< 1, magma_ceilpow2( 1)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 2:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel< 2, magma_ceilpow2( 2)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 3:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel< 3, magma_ceilpow2( 3)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 4:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel< 4, magma_ceilpow2( 4)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 5:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel< 5, magma_ceilpow2( 5)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 6:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel< 6, magma_ceilpow2( 6)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 7:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel< 7, magma_ceilpow2( 7)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 8:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel< 8, magma_ceilpow2( 8)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 9:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel< 9, magma_ceilpow2( 9)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 10:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<10, magma_ceilpow2(10)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 11:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<11, magma_ceilpow2(11)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 12:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<12, magma_ceilpow2(12)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 13:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<13, magma_ceilpow2(13)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 14:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<14, magma_ceilpow2(14)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 15:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<15, magma_ceilpow2(15)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 16:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<16, magma_ceilpow2(16)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 17:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<17, magma_ceilpow2(17)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 18:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<18, magma_ceilpow2(18)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 19:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<19, magma_ceilpow2(19)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 20:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<20, magma_ceilpow2(20)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 21:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<21, magma_ceilpow2(21)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 22:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<22, magma_ceilpow2(22)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 23:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<23, magma_ceilpow2(23)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 24:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<24, magma_ceilpow2(24)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 25:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<25, magma_ceilpow2(25)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 26:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<26, magma_ceilpow2(26)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 27:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<27, magma_ceilpow2(27)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 28:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<28, magma_ceilpow2(28)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 29:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<29, magma_ceilpow2(29)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 30:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<30, magma_ceilpow2(30)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 31:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<31, magma_ceilpow2(31)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 32:hipLaunchKernelGGL(( sgetrf_batched_smallsq_shfl_kernel<32, magma_ceilpow2(32)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break;
default: printf("error: size %lld is not supported\n", (long long) m);
}
return arginfo;
}
| 4825c5aee9e029b22e60777cf2f3d61b383e55b2.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Azzam Haidar
@author Ahmad Abdelfattah
@generated from magmablas/zgetrf_batched_smallsq_shfl.cu, normal z -> s, Thu Oct 8 23:05:37 2020
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "sync.cuh"
#include "shuffle.cuh"
#include "batched_kernel_param.h"
// This kernel uses registers for matrix storage, shared mem. and shuffle for communication.
// It also uses lazy swap.
extern __shared__ float ddata[];
template<int N, int NSHFL>
__global__ void
sgetrf_batched_smallsq_shfl_kernel( float** dA_array, int ldda,
magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int batchid = blockIdx.x * blockDim.y + ty;
if(batchid >= batchCount) return;
float* dA = dA_array[batchid];
magma_int_t* ipiv = ipiv_array[batchid];
magma_int_t* info = &info_array[batchid];
float rA[N] = {MAGMA_S_ZERO};
float y[N] = {MAGMA_S_ZERO};
float reg = MAGMA_S_ZERO;
float update = MAGMA_S_ZERO;
int max_id, current_piv_tx, rowid = tx, linfo = 0;
float rx_abs_max = MAGMA_D_ZERO;
// shared memory pointers
float* sx = (float*)(ddata);
int* sipiv = (int*)(sx + blockDim.y * NSHFL);
sx += ty * NSHFL;
sipiv += ty * (NSHFL+1);
volatile int* scurrent_piv_tx = (volatile int*)(sipiv + NSHFL);
// read
if( tx < N ){
#pragma unroll
for(int i = 0; i < N; i++){
rA[i] = dA[ i * ldda + tx ];
}
}
#pragma unroll
for(int i = 0; i < N; i++){
sx[ rowid ] = fabs(MAGMA_S_REAL( rA[i] )) + fabs(MAGMA_S_IMAG( rA[i] ));
magmablas_syncwarp();
rx_abs_max = sx[i];
max_id = i;
#pragma unroll
for(int j = i; j < N; j++){
if( sx[j] > rx_abs_max){
max_id = j;
rx_abs_max = sx[j];
}
}
linfo = ( rx_abs_max == MAGMA_D_ZERO && linfo == 0) ? (i+1) : linfo;
update = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_S_ZERO : MAGMA_S_ONE;
if(rowid == max_id){
sipiv[i] = max_id;
rowid = i;
(*scurrent_piv_tx) = tx;
}
else if(rowid == i){
rowid = max_id;
}
current_piv_tx = (*scurrent_piv_tx);
magmablas_syncwarp();
#pragma unroll
for(int j = i; j < N; j++){
y[j] = update * magmablas_sshfl( rA[j], current_piv_tx, NSHFL);
}
reg = ( rx_abs_max == MAGMA_D_ZERO ) ? MAGMA_S_ONE : MAGMA_S_DIV(MAGMA_S_ONE, y[i] );
// scal and ger
if( rowid > i ){
rA[i] *= reg;
#pragma unroll
for(int j = i+1; j < N; j++){
rA[j] -= rA[i] * y[j];
}
}
}
// write
if( tx == 0 ){
(*info) = (magma_int_t)linfo;
}
if(tx < N) {
ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1);
#pragma unroll
for(int i = 0; i < N; i++){
dA[ i * ldda + rowid ] = rA[i];
}
}
}
/***************************************************************************//**
Purpose
-------
sgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A
using partial pivoting with row interchanges.
This routine can deal only with square matrices of size up to 32
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
Arguments
---------
@param[in]
n INTEGER
The size of each matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_getrf_batched
*******************************************************************************/
extern "C" magma_int_t
magma_sgetrf_batched_smallsq_shfl(
magma_int_t n,
float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t arginfo = 0;
magma_int_t m = n;
if( (m < 0) || ( m > 32 ) ){
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if( m == 0) return 0;
const magma_int_t ntcol = magma_get_sgetrf_batched_ntcol(m, n);
magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int);
shmem += ntcol * magma_ceilpow2(m) * sizeof(float);
shmem += ntcol * 1 * sizeof(int);
dim3 threads(magma_ceilpow2(m), ntcol, 1);
const magma_int_t gridx = magma_ceildiv(batchCount, ntcol);
dim3 grid(gridx, 1, 1);
switch(m){
case 1: sgetrf_batched_smallsq_shfl_kernel< 1, magma_ceilpow2( 1)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 2: sgetrf_batched_smallsq_shfl_kernel< 2, magma_ceilpow2( 2)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 3: sgetrf_batched_smallsq_shfl_kernel< 3, magma_ceilpow2( 3)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 4: sgetrf_batched_smallsq_shfl_kernel< 4, magma_ceilpow2( 4)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 5: sgetrf_batched_smallsq_shfl_kernel< 5, magma_ceilpow2( 5)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 6: sgetrf_batched_smallsq_shfl_kernel< 6, magma_ceilpow2( 6)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 7: sgetrf_batched_smallsq_shfl_kernel< 7, magma_ceilpow2( 7)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 8: sgetrf_batched_smallsq_shfl_kernel< 8, magma_ceilpow2( 8)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 9: sgetrf_batched_smallsq_shfl_kernel< 9, magma_ceilpow2( 9)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 10: sgetrf_batched_smallsq_shfl_kernel<10, magma_ceilpow2(10)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 11: sgetrf_batched_smallsq_shfl_kernel<11, magma_ceilpow2(11)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 12: sgetrf_batched_smallsq_shfl_kernel<12, magma_ceilpow2(12)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 13: sgetrf_batched_smallsq_shfl_kernel<13, magma_ceilpow2(13)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 14: sgetrf_batched_smallsq_shfl_kernel<14, magma_ceilpow2(14)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 15: sgetrf_batched_smallsq_shfl_kernel<15, magma_ceilpow2(15)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 16: sgetrf_batched_smallsq_shfl_kernel<16, magma_ceilpow2(16)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 17: sgetrf_batched_smallsq_shfl_kernel<17, magma_ceilpow2(17)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 18: sgetrf_batched_smallsq_shfl_kernel<18, magma_ceilpow2(18)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 19: sgetrf_batched_smallsq_shfl_kernel<19, magma_ceilpow2(19)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 20: sgetrf_batched_smallsq_shfl_kernel<20, magma_ceilpow2(20)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 21: sgetrf_batched_smallsq_shfl_kernel<21, magma_ceilpow2(21)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 22: sgetrf_batched_smallsq_shfl_kernel<22, magma_ceilpow2(22)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 23: sgetrf_batched_smallsq_shfl_kernel<23, magma_ceilpow2(23)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 24: sgetrf_batched_smallsq_shfl_kernel<24, magma_ceilpow2(24)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 25: sgetrf_batched_smallsq_shfl_kernel<25, magma_ceilpow2(25)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 26: sgetrf_batched_smallsq_shfl_kernel<26, magma_ceilpow2(26)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 27: sgetrf_batched_smallsq_shfl_kernel<27, magma_ceilpow2(27)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 28: sgetrf_batched_smallsq_shfl_kernel<28, magma_ceilpow2(28)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 29: sgetrf_batched_smallsq_shfl_kernel<29, magma_ceilpow2(29)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 30: sgetrf_batched_smallsq_shfl_kernel<30, magma_ceilpow2(30)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 31: sgetrf_batched_smallsq_shfl_kernel<31, magma_ceilpow2(31)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
case 32: sgetrf_batched_smallsq_shfl_kernel<32, magma_ceilpow2(32)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break;
default: printf("error: size %lld is not supported\n", (long long) m);
}
return arginfo;
}
|
91eb5601f7290f1826248ea5119b921d37e73475.hip | // !!! This is a file automatically generated by hipify!!!
#include "ExpThrustFunctor.hh"
__device__ fptype device_Exp (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
fptype alpha = p[indices[1]];
fptype ret = EXP(alpha*x);
return ret;
}
__device__ fptype device_ExpOffset (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
x -= indices[1];
fptype alpha = p[indices[2]];
fptype ret = EXP(alpha*x);
return ret;
}
__device__ fptype device_ExpPoly (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
fptype exparg = 0;
for (int i = 0; i <= indices[0]; ++i) {
exparg += POW(x, i) * p[indices[i+1]];
}
fptype ret = EXP(exparg);
return ret;
}
__device__ fptype device_ExpPolyOffset (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
x -= indices[1];
fptype exparg = 0;
for (int i = 0; i <= indices[0]; ++i) {
exparg += POW(x, i) * p[indices[i+2]];
}
fptype ret = EXP(exparg);
return ret;
}
__device__ device_function_ptr ptr_to_Exp = device_Exp;
__device__ device_function_ptr ptr_to_ExpPoly = device_ExpPoly;
__device__ device_function_ptr ptr_to_ExpOffset = device_ExpOffset;
__device__ device_function_ptr ptr_to_ExpPolyOffset = device_ExpPolyOffset;
__host__ ExpThrustFunctor::ExpThrustFunctor (std::string n, Variable* _x, Variable* alpha, Variable* offset)
: ThrustPdfFunctor(_x, n)
{
std::vector<unsigned int> pindices;
if (offset) {
pindices.push_back(registerParameter(offset));
pindices.push_back(registerParameter(alpha));
hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_ExpOffset, sizeof(void*));
initialise(pindices);
}
else {
pindices.push_back(registerParameter(alpha));
hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_Exp, sizeof(void*));
initialise(pindices);
}
}
__host__ ExpThrustFunctor::ExpThrustFunctor (std::string n, Variable* _x, std::vector<Variable*>& weights, Variable* offset)
: ThrustPdfFunctor(_x, n)
{
std::vector<unsigned int> pindices;
if (offset) pindices.push_back(registerParameter(offset));
assert(0 < weights.size());
for (std::vector<Variable*>::iterator w = weights.begin(); w != weights.end(); ++w) {
pindices.push_back(registerParameter(*w));
}
if (offset) hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_ExpPolyOffset, sizeof(void*));
else hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_ExpPoly, sizeof(void*));
initialise(pindices);
}
__host__ fptype ExpThrustFunctor::integrate (fptype lo, fptype hi) const {
fptype alpha = host_params[host_indices[parameters + 1]];
if (0 == alpha) {
// This gives a constant 1 all across the range
return (hi - lo);
}
fptype ret = EXP(alpha*hi) - EXP(alpha*lo);
ret /= alpha;
return ret;
}
| 91eb5601f7290f1826248ea5119b921d37e73475.cu | #include "ExpThrustFunctor.hh"
__device__ fptype device_Exp (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
fptype alpha = p[indices[1]];
fptype ret = EXP(alpha*x);
return ret;
}
__device__ fptype device_ExpOffset (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
x -= indices[1];
fptype alpha = p[indices[2]];
fptype ret = EXP(alpha*x);
return ret;
}
__device__ fptype device_ExpPoly (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
fptype exparg = 0;
for (int i = 0; i <= indices[0]; ++i) {
exparg += POW(x, i) * p[indices[i+1]];
}
fptype ret = EXP(exparg);
return ret;
}
__device__ fptype device_ExpPolyOffset (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
x -= indices[1];
fptype exparg = 0;
for (int i = 0; i <= indices[0]; ++i) {
exparg += POW(x, i) * p[indices[i+2]];
}
fptype ret = EXP(exparg);
return ret;
}
__device__ device_function_ptr ptr_to_Exp = device_Exp;
__device__ device_function_ptr ptr_to_ExpPoly = device_ExpPoly;
__device__ device_function_ptr ptr_to_ExpOffset = device_ExpOffset;
__device__ device_function_ptr ptr_to_ExpPolyOffset = device_ExpPolyOffset;
__host__ ExpThrustFunctor::ExpThrustFunctor (std::string n, Variable* _x, Variable* alpha, Variable* offset)
: ThrustPdfFunctor(_x, n)
{
std::vector<unsigned int> pindices;
if (offset) {
pindices.push_back(registerParameter(offset));
pindices.push_back(registerParameter(alpha));
cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_ExpOffset, sizeof(void*));
initialise(pindices);
}
else {
pindices.push_back(registerParameter(alpha));
cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_Exp, sizeof(void*));
initialise(pindices);
}
}
__host__ ExpThrustFunctor::ExpThrustFunctor (std::string n, Variable* _x, std::vector<Variable*>& weights, Variable* offset)
: ThrustPdfFunctor(_x, n)
{
std::vector<unsigned int> pindices;
if (offset) pindices.push_back(registerParameter(offset));
assert(0 < weights.size());
for (std::vector<Variable*>::iterator w = weights.begin(); w != weights.end(); ++w) {
pindices.push_back(registerParameter(*w));
}
if (offset) cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_ExpPolyOffset, sizeof(void*));
else cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_ExpPoly, sizeof(void*));
initialise(pindices);
}
__host__ fptype ExpThrustFunctor::integrate (fptype lo, fptype hi) const {
fptype alpha = host_params[host_indices[parameters + 1]];
if (0 == alpha) {
// This gives a constant 1 all across the range
return (hi - lo);
}
fptype ret = EXP(alpha*hi) - EXP(alpha*lo);
ret /= alpha;
return ret;
}
|
548b0cc3155d57eb19ec774bfcaff7f488c0e590.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 3 of 5: implement the kernel
__global__ void myFirstKernel( int *d_a )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_a[idx] = 1000 * blockIdx.x + threadIdx.x;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
hipMalloc( &d_a, memSize );
// Part 2 of 5: configure and launch kernel
dim3 dimGrid( numBlocks );
dim3 dimBlock( numThreadsPerBlock );
hipLaunchKernelGGL(( myFirstKernel), dim3(numBlocks) , dim3(numThreadsPerBlock) , 0, 0, d_a );
// block until the device has completed
hipDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
hipMemcpy(h_a,d_a,memSize, hipMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("hipMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
for (int i = 0; i < numBlocks ; i++)
{
for (int j = 0; j < numThreadsPerBlock ; j++)
{
assert(h_a[i * numThreadsPerBlock + j] == 1000 * i + j);
}
}
// free device memory
hipFree(d_a);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(-1);
}
}
| 548b0cc3155d57eb19ec774bfcaff7f488c0e590.cu | /*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 3 of 5: implement the kernel
__global__ void myFirstKernel( int *d_a )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_a[idx] = 1000 * blockIdx.x + threadIdx.x;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc( &d_a, memSize );
// Part 2 of 5: configure and launch kernel
dim3 dimGrid( numBlocks );
dim3 dimBlock( numThreadsPerBlock );
myFirstKernel<<< numBlocks , numThreadsPerBlock >>>( d_a );
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
cudaMemcpy(h_a,d_a,memSize, cudaMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
for (int i = 0; i < numBlocks ; i++)
{
for (int j = 0; j < numThreadsPerBlock ; j++)
{
assert(h_a[i * numThreadsPerBlock + j] == 1000 * i + j);
}
}
// free device memory
cudaFree(d_a);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
|
d00a3acce83a431d29957a945905b792632bbf79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/cudev/ptr2d/glob.hpp"
using namespace cv::cudev;
void RGB_to_YV12(const GpuMat& src, GpuMat& dst);
namespace
{
__device__ __forceinline__ void rgb_to_y(const uchar b, const uchar g, const uchar r, uchar& y)
{
y = static_cast<uchar>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
__device__ __forceinline__ void rgb_to_yuv(const uchar b, const uchar g, const uchar r, uchar& y, uchar& u, uchar& v)
{
rgb_to_y(b, g, r, y);
u = static_cast<uchar>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uchar>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
__global__ void Gray_to_YV12(const GlobPtrSz<uchar> src, GlobPtr<uchar> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<uchar> y_plane = globPtr(dst.data, dst.step);
GlobPtr<uchar> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<uchar> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
uchar pix;
uchar y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix, pix, pix, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
template <typename T>
__global__ void RGB_to_YV12(const GlobPtrSz<T> src, GlobPtr<uchar> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<uchar> y_plane = globPtr(dst.data, dst.step);
GlobPtr<uchar> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<uchar> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
T pix;
uchar y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.z, pix.y, pix.x, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
}
void RGB_to_YV12(const GpuMat& src, GpuMat& dst)
{
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
switch (src.channels())
{
case 1:
hipLaunchKernelGGL(( Gray_to_YV12), dim3(grid), dim3(block), 0, 0, globPtr<uchar>(src), globPtr<uchar>(dst));
break;
case 3:
hipLaunchKernelGGL(( RGB_to_YV12), dim3(grid), dim3(block), 0, 0, globPtr<uchar3>(src), globPtr<uchar>(dst));
break;
case 4:
hipLaunchKernelGGL(( RGB_to_YV12), dim3(grid), dim3(block), 0, 0, globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
}
CV_CUDEV_SAFE_CALL( hipGetLastError() );
CV_CUDEV_SAFE_CALL( hipDeviceSynchronize() );
}
#endif
| d00a3acce83a431d29957a945905b792632bbf79.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifndef HAVE_OPENCV_CUDEV
#error "opencv_cudev is required"
#else
#include "opencv2/cudev/ptr2d/glob.hpp"
using namespace cv::cudev;
void RGB_to_YV12(const GpuMat& src, GpuMat& dst);
namespace
{
__device__ __forceinline__ void rgb_to_y(const uchar b, const uchar g, const uchar r, uchar& y)
{
y = static_cast<uchar>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
__device__ __forceinline__ void rgb_to_yuv(const uchar b, const uchar g, const uchar r, uchar& y, uchar& u, uchar& v)
{
rgb_to_y(b, g, r, y);
u = static_cast<uchar>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uchar>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
__global__ void Gray_to_YV12(const GlobPtrSz<uchar> src, GlobPtr<uchar> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<uchar> y_plane = globPtr(dst.data, dst.step);
GlobPtr<uchar> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<uchar> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
uchar pix;
uchar y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix, pix, pix, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix, pix, pix, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
template <typename T>
__global__ void RGB_to_YV12(const GlobPtrSz<T> src, GlobPtr<uchar> dst)
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if (x + 1 >= src.cols || y + 1 >= src.rows)
return;
// get pointers to the data
const size_t planeSize = src.rows * dst.step;
GlobPtr<uchar> y_plane = globPtr(dst.data, dst.step);
GlobPtr<uchar> u_plane = globPtr(y_plane.data + planeSize, dst.step / 2);
GlobPtr<uchar> v_plane = globPtr(u_plane.data + (planeSize / 4), dst.step / 2);
T pix;
uchar y_val, u_val, v_val;
pix = src(y, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x) = y_val;
pix = src(y, x + 1);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y, x + 1) = y_val;
pix = src(y + 1, x);
rgb_to_y(pix.z, pix.y, pix.x, y_val);
y_plane(y + 1, x) = y_val;
pix = src(y + 1, x + 1);
rgb_to_yuv(pix.z, pix.y, pix.x, y_val, u_val, v_val);
y_plane(y + 1, x + 1) = y_val;
u_plane(y / 2, x / 2) = u_val;
v_plane(y / 2, x / 2) = v_val;
}
}
void RGB_to_YV12(const GpuMat& src, GpuMat& dst)
{
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x * 2), divUp(src.rows, block.y * 2));
switch (src.channels())
{
case 1:
Gray_to_YV12<<<grid, block>>>(globPtr<uchar>(src), globPtr<uchar>(dst));
break;
case 3:
RGB_to_YV12<<<grid, block>>>(globPtr<uchar3>(src), globPtr<uchar>(dst));
break;
case 4:
RGB_to_YV12<<<grid, block>>>(globPtr<uchar4>(src), globPtr<uchar>(dst));
break;
}
CV_CUDEV_SAFE_CALL( cudaGetLastError() );
CV_CUDEV_SAFE_CALL( cudaDeviceSynchronize() );
}
#endif
|
761998b4dd84687531fb14bdb0c48b184c5c54e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <mshadow/tensor.h>
#include "./reorg-inl.h"
#include "../mxnet_op.h"
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
template<typename DType>
__global__ void ReorgForwardKernel(
int n, const DType* data,
int data_height, int data_width, int out_height, int out_width,
int kernel_h, int kernel_w, int stride_h, int stride_w,
int dilate_h, int dilate_w, int pad_h, int pad_w,
DType* out) {
// each kernel is for each sample n == C * s * s * H' * W'
CUDA_KERNEL_LOOP(index, n) {
int out_w = index % out_width;
int out_h = index / out_width % out_height;
int s_w = index / out_width / out_height % kernel_w;
int s_h = index / out_width / out_height / kernel_w % kernel_h;
int ch = index / out_width / out_height / kernel_w / kernel_h;
int data_w = out_w * stride_w - pad_w + s_w * dilate_w;
int data_h = out_h * stride_h - pad_h + s_h * dilate_h;
int data_dim = data_height * data_width;
// int out_dim = kernel_h * kernel_w * out_height * out_width;
out[index] =
(0 <= data_h && data_h < data_height && 0 <= data_w && data_w < data_width) ?
data[ch * data_dim + data_h * data_height + data_w] : static_cast<DType>(0);
}
}
template<typename DType>
__global__ void ReorgBackwardKernel(
int n, const DType* dOut,
int data_height, int data_width, int out_height, int out_width,
int kernel_h, int kernel_w, int stride_h, int stride_w,
int dilate_h, int dilate_w, int pad_h, int pad_w,
DType* dData) {
// each kernel is for each data item n == C * H * W
CUDA_KERNEL_LOOP(index, n) {
int data_w = index % data_width;
int data_h = index / data_width % data_height;
int ch = index /data_width / data_height;
DType val = static_cast<DType>(0);
for (int s_h = 0; s_h < kernel_h; ++s_h) {
for (int s_w = 0; s_w < kernel_w; ++s_w) {
int x_prime = data_w + pad_w - s_w * dilate_w;
int y_prime = data_h + pad_h - s_h * dilate_h;
if (x_prime >= 0 && x_prime % stride_w == 0 &&
y_prime >= 0 && y_prime % stride_h == 0) {
int out_w = x_prime / stride_w;
int out_h = y_prime / stride_h;
if (out_w < out_width && out_h < out_height)
val += dOut[(((ch * kernel_h + s_h) * kernel_w + s_w) * out_height + out_h) * out_width + out_w];
}
}
}
dData[index] += val;
}
}
} // namespace cuda
template<typename DType>
inline void ReorgForward(
const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 4, DType> &out,
int kernel_h, int kernel_w, int stride_h, int stride_w,
int dilate_h, int dilate_w, int pad_h, int pad_w) {
using namespace mxnet::op::mxnet_op;
// get the shape info
int num = data.shape_[0];
int channels = data.shape_[1];
int data_height = data.shape_[2];
int data_width = data.shape_[3];
// calculate the out_height and out_width
int out_width = ((data_width + 2 * pad_w) - (kernel_w - 1) * dilate_w) / stride_w;
int out_height = ((data_height + 2 * pad_h) - (kernel_h - 1) * dilate_h) / stride_h;
int num_threads = channels * kernel_h * kernel_w * out_height * out_width;
for (int n = 0; n < num; ++n) {
Tensor<gpu, 3, DType> input_3d = data[n];
Tensor<gpu, 3, DType> output_3d = out[n];
hipLaunchKernelGGL(( cuda::ReorgForwardKernel<DType>)
, dim3(cuda_get_num_blocks(num_threads)), dim3(cuda::kBaseThreadNum),
0, Stream<gpu>::GetStream(output_3d.stream_),
num_threads, input_3d.dptr_, data_height, data_width, out_height, out_width,
kernel_h, kernel_w, stride_h, stride_w, dilate_h, dilate_w, pad_h, pad_w, output_3d.dptr_);
MSHADOW_CUDA_POST_KERNEL_CHECK(ReorgForwardKernel);
}
}
template<typename DType>
inline void ReorgBackward(
const Tensor<gpu, 4, DType> dData, const Tensor<gpu, 4, DType> dOut,
int kernel_h, int kernel_w, int stride_h, int stride_w,
int dilate_h, int dilate_w, int pad_h, int pad_w) {
using namespace mxnet::op::mxnet_op;
// get the shape info
int num = dData.shape_[0];
int channels = dData.shape_[1];
int data_height = dData.shape_[2];
int data_width = dData.shape_[3];
// calculate the out_height and out_width
int out_height = dOut.shape_[2];
int out_width = dOut.shape_[3];
int num_threads = channels * data_height * data_width;
for (int n = 0; n < num; ++n) {
Tensor<gpu, 3, DType> ingrad_3d = dData[n];
Tensor<gpu, 3, DType> outgrad_3d = dOut[n];
hipLaunchKernelGGL(( cuda::ReorgBackwardKernel<DType>)
, dim3(cuda_get_num_blocks(num_threads)), dim3(cuda::kBaseThreadNum),
0, Stream<gpu>::GetStream(ingrad_3d.stream_),
num_threads, outgrad_3d.dptr_, data_height, data_width, out_height, out_width,
kernel_h, kernel_w, stride_h, stride_w, dilate_h, dilate_w, pad_h, pad_w, ingrad_3d.dptr_);
MSHADOW_CUDA_POST_KERNEL_CHECK(ReorgBackwardKernel);
}
}
} // namespace mshadow
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(Reorg)
.set_attr<FCompute>("FCompute<gpu>", ReorgCompute<gpu>);
NNVM_REGISTER_OP(_backward_Reorg)
.set_attr<FCompute>("FCompute<gpu>", ReorgGradCompute<gpu>);
}
}
| 761998b4dd84687531fb14bdb0c48b184c5c54e2.cu | #include <mshadow/tensor.h>
#include "./reorg-inl.h"
#include "../mxnet_op.h"
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
template<typename DType>
__global__ void ReorgForwardKernel(
int n, const DType* data,
int data_height, int data_width, int out_height, int out_width,
int kernel_h, int kernel_w, int stride_h, int stride_w,
int dilate_h, int dilate_w, int pad_h, int pad_w,
DType* out) {
// each kernel is for each sample n == C * s * s * H' * W'
CUDA_KERNEL_LOOP(index, n) {
int out_w = index % out_width;
int out_h = index / out_width % out_height;
int s_w = index / out_width / out_height % kernel_w;
int s_h = index / out_width / out_height / kernel_w % kernel_h;
int ch = index / out_width / out_height / kernel_w / kernel_h;
int data_w = out_w * stride_w - pad_w + s_w * dilate_w;
int data_h = out_h * stride_h - pad_h + s_h * dilate_h;
int data_dim = data_height * data_width;
// int out_dim = kernel_h * kernel_w * out_height * out_width;
out[index] =
(0 <= data_h && data_h < data_height && 0 <= data_w && data_w < data_width) ?
data[ch * data_dim + data_h * data_height + data_w] : static_cast<DType>(0);
}
}
template<typename DType>
__global__ void ReorgBackwardKernel(
int n, const DType* dOut,
int data_height, int data_width, int out_height, int out_width,
int kernel_h, int kernel_w, int stride_h, int stride_w,
int dilate_h, int dilate_w, int pad_h, int pad_w,
DType* dData) {
// each kernel is for each data item n == C * H * W
CUDA_KERNEL_LOOP(index, n) {
int data_w = index % data_width;
int data_h = index / data_width % data_height;
int ch = index /data_width / data_height;
DType val = static_cast<DType>(0);
for (int s_h = 0; s_h < kernel_h; ++s_h) {
for (int s_w = 0; s_w < kernel_w; ++s_w) {
int x_prime = data_w + pad_w - s_w * dilate_w;
int y_prime = data_h + pad_h - s_h * dilate_h;
if (x_prime >= 0 && x_prime % stride_w == 0 &&
y_prime >= 0 && y_prime % stride_h == 0) {
int out_w = x_prime / stride_w;
int out_h = y_prime / stride_h;
if (out_w < out_width && out_h < out_height)
val += dOut[(((ch * kernel_h + s_h) * kernel_w + s_w) * out_height + out_h) * out_width + out_w];
}
}
}
dData[index] += val;
}
}
} // namespace cuda
template<typename DType>
inline void ReorgForward(
const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 4, DType> &out,
int kernel_h, int kernel_w, int stride_h, int stride_w,
int dilate_h, int dilate_w, int pad_h, int pad_w) {
using namespace mxnet::op::mxnet_op;
// get the shape info
int num = data.shape_[0];
int channels = data.shape_[1];
int data_height = data.shape_[2];
int data_width = data.shape_[3];
// calculate the out_height and out_width
int out_width = ((data_width + 2 * pad_w) - (kernel_w - 1) * dilate_w) / stride_w;
int out_height = ((data_height + 2 * pad_h) - (kernel_h - 1) * dilate_h) / stride_h;
int num_threads = channels * kernel_h * kernel_w * out_height * out_width;
for (int n = 0; n < num; ++n) {
Tensor<gpu, 3, DType> input_3d = data[n];
Tensor<gpu, 3, DType> output_3d = out[n];
cuda::ReorgForwardKernel<DType>
<<<cuda_get_num_blocks(num_threads), cuda::kBaseThreadNum,
0, Stream<gpu>::GetStream(output_3d.stream_)>>>(
num_threads, input_3d.dptr_, data_height, data_width, out_height, out_width,
kernel_h, kernel_w, stride_h, stride_w, dilate_h, dilate_w, pad_h, pad_w, output_3d.dptr_);
MSHADOW_CUDA_POST_KERNEL_CHECK(ReorgForwardKernel);
}
}
template<typename DType>
inline void ReorgBackward(
const Tensor<gpu, 4, DType> dData, const Tensor<gpu, 4, DType> dOut,
int kernel_h, int kernel_w, int stride_h, int stride_w,
int dilate_h, int dilate_w, int pad_h, int pad_w) {
using namespace mxnet::op::mxnet_op;
// get the shape info
int num = dData.shape_[0];
int channels = dData.shape_[1];
int data_height = dData.shape_[2];
int data_width = dData.shape_[3];
// calculate the out_height and out_width
int out_height = dOut.shape_[2];
int out_width = dOut.shape_[3];
int num_threads = channels * data_height * data_width;
for (int n = 0; n < num; ++n) {
Tensor<gpu, 3, DType> ingrad_3d = dData[n];
Tensor<gpu, 3, DType> outgrad_3d = dOut[n];
cuda::ReorgBackwardKernel<DType>
<<<cuda_get_num_blocks(num_threads), cuda::kBaseThreadNum,
0, Stream<gpu>::GetStream(ingrad_3d.stream_)>>>(
num_threads, outgrad_3d.dptr_, data_height, data_width, out_height, out_width,
kernel_h, kernel_w, stride_h, stride_w, dilate_h, dilate_w, pad_h, pad_w, ingrad_3d.dptr_);
MSHADOW_CUDA_POST_KERNEL_CHECK(ReorgBackwardKernel);
}
}
} // namespace mshadow
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(Reorg)
.set_attr<FCompute>("FCompute<gpu>", ReorgCompute<gpu>);
NNVM_REGISTER_OP(_backward_Reorg)
.set_attr<FCompute>("FCompute<gpu>", ReorgGradCompute<gpu>);
}
}
|
5542fe5f4b5401887a487114cb3063eec2862fe8.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <cmath>
#include <ATen/native/quantized/hip/fake_quantize_core.h>
/* FakeQuantize Op for PerTensorAffine quantization scheme */
namespace at {
namespace native {
/* Fake-quantizes the 'inputs' tensor.
Args:
self: Forward input tensor.
scale: scale of per tensor affine quantization
zero_point: zero_point of per tensor affine quantization
quant_min: minimum quantized value
quant_max: maximum quantized value
Returns:
Quantized tensor (double dtype).
*/
Tensor fake_quantize_per_tensor_affine_cuda(
const Tensor& self,
double scale,
int64_t zero_point,
int64_t quant_min,
int64_t quant_max) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(self.scalar_type() == ScalarType::Float);
TORCH_CHECK(
quant_min <= quant_max,
"`quant_min` should be less than or \
equal to `quant_max`.");
TORCH_CHECK(
zero_point >= quant_min && zero_point <= quant_max,
"`zero_point` must be between `quant_min` and `quant_max`.");
auto Y = at::empty_like(self, self.options(), MemoryFormat::Preserve);
fake_quantize_slice_cuda(Y, self, scale, zero_point, quant_min, quant_max);
return Y;
}
/* Backward path to fake-quantize the 'inputs' tensor.
Args:
dY: Backward input tensor.
X: Forward input tensor.
scale: scale of per tensor affine quantization
zero_point: zero_point of per tensor affine quantization
quant_min: minimum quantized value
quant_max: maximum quantized value
Returns:
Quantized tensor (double dtype).
*/
Tensor fake_quantize_per_tensor_affine_backward_cuda(
const Tensor& dY,
const Tensor& X,
double scale,
int64_t zero_point,
int64_t quant_min,
int64_t quant_max) {
TORCH_CHECK(dY.is_cuda());
TORCH_CHECK(dY.scalar_type() == ScalarType::Float);
TORCH_CHECK(X.is_cuda());
TORCH_CHECK(X.scalar_type() == ScalarType::Float);
TORCH_CHECK(X.numel() == dY.numel(), "`X` and `dY` are not the same size");
TORCH_CHECK(
quant_min <= quant_max,
"`quant_min` should be less than or \
equal to `quant_max`.");
TORCH_CHECK(
zero_point >= quant_min && zero_point <= quant_max,
"`zero_point` must be between `quant_min` and `quant_max`.");
if (X.numel() <= 0) {
return X;
}
auto dX = at::empty_like(X, X.options(), MemoryFormat::Preserve);
fake_quantize_grad_slice_cuda(
dX, X, dY, scale, zero_point, quant_min, quant_max);
return dX;
}
} // namespace native
} // namespace at
| 5542fe5f4b5401887a487114cb3063eec2862fe8.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <cmath>
#include <ATen/native/quantized/cuda/fake_quantize_core.h>
/* FakeQuantize Op for PerTensorAffine quantization scheme */
namespace at {
namespace native {
/* Fake-quantizes the 'inputs' tensor.
Args:
self: Forward input tensor.
scale: scale of per tensor affine quantization
zero_point: zero_point of per tensor affine quantization
quant_min: minimum quantized value
quant_max: maximum quantized value
Returns:
Quantized tensor (double dtype).
*/
Tensor fake_quantize_per_tensor_affine_cuda(
const Tensor& self,
double scale,
int64_t zero_point,
int64_t quant_min,
int64_t quant_max) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(self.scalar_type() == ScalarType::Float);
TORCH_CHECK(
quant_min <= quant_max,
"`quant_min` should be less than or \
equal to `quant_max`.");
TORCH_CHECK(
zero_point >= quant_min && zero_point <= quant_max,
"`zero_point` must be between `quant_min` and `quant_max`.");
auto Y = at::empty_like(self, self.options(), MemoryFormat::Preserve);
fake_quantize_slice_cuda(Y, self, scale, zero_point, quant_min, quant_max);
return Y;
}
/* Backward path to fake-quantize the 'inputs' tensor.
Args:
dY: Backward input tensor.
X: Forward input tensor.
scale: scale of per tensor affine quantization
zero_point: zero_point of per tensor affine quantization
quant_min: minimum quantized value
quant_max: maximum quantized value
Returns:
Quantized tensor (double dtype).
*/
Tensor fake_quantize_per_tensor_affine_backward_cuda(
const Tensor& dY,
const Tensor& X,
double scale,
int64_t zero_point,
int64_t quant_min,
int64_t quant_max) {
TORCH_CHECK(dY.is_cuda());
TORCH_CHECK(dY.scalar_type() == ScalarType::Float);
TORCH_CHECK(X.is_cuda());
TORCH_CHECK(X.scalar_type() == ScalarType::Float);
TORCH_CHECK(X.numel() == dY.numel(), "`X` and `dY` are not the same size");
TORCH_CHECK(
quant_min <= quant_max,
"`quant_min` should be less than or \
equal to `quant_max`.");
TORCH_CHECK(
zero_point >= quant_min && zero_point <= quant_max,
"`zero_point` must be between `quant_min` and `quant_max`.");
if (X.numel() <= 0) {
return X;
}
auto dX = at::empty_like(X, X.options(), MemoryFormat::Preserve);
fake_quantize_grad_slice_cuda(
dX, X, dY, scale, zero_point, quant_min, quant_max);
return dX;
}
} // namespace native
} // namespace at
|
516f70bc9840e9cca0a1826efc3d9f973f90ee98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include "common.h"
// Kernel for fast unfold+copy
// (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu)
__global__ void im2col_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col,
float* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
index /= width_col;
int h_out = index % height_col;
int channel_in = index / height_col;
int channel_out = channel_in * ksize_h * ksize_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
data_col += (channel_out * height_col + h_out) * width_col + w_out;
data_im += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize_h; ++i) {
for (int j = 0; j < ksize_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[i * width + j] : 0;
data_col += height_col * width_col;
}
}
}
}
void im2col(hipStream_t stream, const float* data_im, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w, float* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - ksize_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - ksize_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// Launch
hipLaunchKernelGGL(( im2col_kernel) , dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream,
num_kernels, data_im, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_col
);
}
static int stcunn_StochasticSpatialConvolution_updateOutput(lua_State *L) {
THCState *state = getCutorchState(L);
// Input
THCudaTensor *input_mu = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *input_var = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
// Params:
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
int padding = luaT_getfieldcheckint(L, 1, "padding");
THCudaTensor *weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *weight2 = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight2", "torch.CudaTensor");
THCudaTensor *bias = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "bias", "torch.CudaTensor");
THCudaTensor *columns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *ones = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "fgradInput", "torch.CudaTensor");
THCudaTensor *mu = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "mu", "torch.CudaTensor");
THCudaTensor *var = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "var", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 9, input_mu, input_var, mu, var, weight, weight2, bias, columns, ones));
luaL_argcheck(L, input_mu->nDimension == 3 || input_mu->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input_mu->nDimension == 3) {
luaL_argcheck(L, input_mu->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match");
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input_mu, 1, input_mu->size[0], input_mu->size[1], input_mu->size[2]);
THCudaTensor_resize4d(state, input_var, 1, input_var->size[0], input_var->size[1], input_var->size[2]);
} else {
luaL_argcheck(L, input_mu->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match");
}
long inputWidth = input_mu->size[3];
long inputHeight = input_mu->size[2];
long outputWidth = (inputWidth + 2*padding - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padding - kH) / dH + 1;
// Batch size + input planes
long batchSize = input_mu->size[0];
// Resize output
THCudaTensor_resize4d(state, mu, batchSize, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize4d(state, var, batchSize, nOutputPlane, outputHeight, outputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Helpers
THCudaTensor *input_mu_n = THCudaTensor_new(state);
THCudaTensor *input_var_n = THCudaTensor_new(state);
THCudaTensor *output_mu_n = THCudaTensor_new(state);
THCudaTensor *output_var_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_mu_n, input_mu, 0, elt);
THCudaTensor_select(state, input_var_n, input_var, 0, elt);
THCudaTensor_select(state, output_mu_n, mu, 0, elt);
THCudaTensor_select(state, output_var_n, var, 0, elt);
// var
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_var_n),
nInputPlane, inputHeight, inputWidth, kH, kW, padding, padding, dH, dW,
THCudaTensor_data(state, columns)
);
long m = weight->size[0];
long n = columns->size[1];
long k = weight->size[1];
THCudaBlas_gemm(
state,
'n', 'n',
n, m, k,
1,
THCudaTensor_data(state, columns), n,
THCudaTensor_data(state, weight2), k,
0,
THCudaTensor_data(state, output_var_n), n
);
// mu
long m_ = nOutputPlane;
long n_ = outputHeight * outputWidth;
long k_ = 1;
THCudaBlas_gemm(
state,
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(state, ones), k_,
THCudaTensor_data(state, bias), k_,
0,
THCudaTensor_data(state, output_mu_n), n_
);
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_mu_n),
nInputPlane, inputHeight, inputWidth, kH, kW, padding, padding, dH, dW,
THCudaTensor_data(state, columns)
);
THCudaBlas_gemm(
state,
'n', 'n',
n, m, k,
1,
THCudaTensor_data(state, columns), n,
THCudaTensor_data(state, weight), k,
1,
THCudaTensor_data(state, output_mu_n), n
);
}
// Free
THCudaTensor_free(state, input_mu_n);
THCudaTensor_free(state, input_var_n);
THCudaTensor_free(state, output_mu_n);
THCudaTensor_free(state, output_var_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(state, mu, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, var, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input_mu, nInputPlane, inputHeight, inputWidth);
THCudaTensor_resize3d(state, input_var, nInputPlane, inputHeight, inputWidth);
}
// return output
return 2;
}
static const struct luaL_Reg stcunn_StochasticSpatialConvolution__ [] = {
{"StochasticSpatialConvolution_updateOutput", stcunn_StochasticSpatialConvolution_updateOutput},
{NULL, NULL}
};
static void stcunn_StochasticSpatialConvolution_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, stcunn_StochasticSpatialConvolution__, "nn");
lua_pop(L,1);
}
| 516f70bc9840e9cca0a1826efc3d9f973f90ee98.cu | #include "utils.h"
#include "common.h"
// Kernel for fast unfold+copy
// (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu)
__global__ void im2col_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col,
float* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
index /= width_col;
int h_out = index % height_col;
int channel_in = index / height_col;
int channel_out = channel_in * ksize_h * ksize_w;
int h_in = h_out * stride_h - pad_h;
int w_in = w_out * stride_w - pad_w;
data_col += (channel_out * height_col + h_out) * width_col + w_out;
data_im += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize_h; ++i) {
for (int j = 0; j < ksize_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[i * width + j] : 0;
data_col += height_col * width_col;
}
}
}
}
void im2col(cudaStream_t stream, const float* data_im, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w, float* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h - ksize_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - ksize_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// Launch
im2col_kernel <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>> (
num_kernels, data_im, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w,
height_col, width_col, data_col
);
}
static int stcunn_StochasticSpatialConvolution_updateOutput(lua_State *L) {
THCState *state = getCutorchState(L);
// Input
THCudaTensor *input_mu = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *input_var = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
// Params:
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
int padding = luaT_getfieldcheckint(L, 1, "padding");
THCudaTensor *weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *weight2 = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight2", "torch.CudaTensor");
THCudaTensor *bias = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "bias", "torch.CudaTensor");
THCudaTensor *columns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *ones = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "fgradInput", "torch.CudaTensor");
THCudaTensor *mu = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "mu", "torch.CudaTensor");
THCudaTensor *var = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "var", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 9, input_mu, input_var, mu, var, weight, weight2, bias, columns, ones));
luaL_argcheck(L, input_mu->nDimension == 3 || input_mu->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input_mu->nDimension == 3) {
luaL_argcheck(L, input_mu->size[0] == nInputPlane, 2, "input channels and nInputPlane dont match");
// Force batch
batch = 0;
THCudaTensor_resize4d(state, input_mu, 1, input_mu->size[0], input_mu->size[1], input_mu->size[2]);
THCudaTensor_resize4d(state, input_var, 1, input_var->size[0], input_var->size[1], input_var->size[2]);
} else {
luaL_argcheck(L, input_mu->size[1] == nInputPlane, 2, "input channels and nInputPlane dont match");
}
long inputWidth = input_mu->size[3];
long inputHeight = input_mu->size[2];
long outputWidth = (inputWidth + 2*padding - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padding - kH) / dH + 1;
// Batch size + input planes
long batchSize = input_mu->size[0];
// Resize output
THCudaTensor_resize4d(state, mu, batchSize, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize4d(state, var, batchSize, nOutputPlane, outputHeight, outputWidth);
// Resize temporary columns
THCudaTensor_resize2d(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(state, ones, outputHeight, outputWidth);
THCudaTensor_fill(state, ones, 1);
}
// Helpers
THCudaTensor *input_mu_n = THCudaTensor_new(state);
THCudaTensor *input_var_n = THCudaTensor_new(state);
THCudaTensor *output_mu_n = THCudaTensor_new(state);
THCudaTensor *output_var_n = THCudaTensor_new(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(state, input_mu_n, input_mu, 0, elt);
THCudaTensor_select(state, input_var_n, input_var, 0, elt);
THCudaTensor_select(state, output_mu_n, mu, 0, elt);
THCudaTensor_select(state, output_var_n, var, 0, elt);
// var
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_var_n),
nInputPlane, inputHeight, inputWidth, kH, kW, padding, padding, dH, dW,
THCudaTensor_data(state, columns)
);
long m = weight->size[0];
long n = columns->size[1];
long k = weight->size[1];
THCudaBlas_gemm(
state,
'n', 'n',
n, m, k,
1,
THCudaTensor_data(state, columns), n,
THCudaTensor_data(state, weight2), k,
0,
THCudaTensor_data(state, output_var_n), n
);
// mu
long m_ = nOutputPlane;
long n_ = outputHeight * outputWidth;
long k_ = 1;
THCudaBlas_gemm(
state,
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(state, ones), k_,
THCudaTensor_data(state, bias), k_,
0,
THCudaTensor_data(state, output_mu_n), n_
);
im2col(
THCState_getCurrentStream(state),
THCudaTensor_data(state, input_mu_n),
nInputPlane, inputHeight, inputWidth, kH, kW, padding, padding, dH, dW,
THCudaTensor_data(state, columns)
);
THCudaBlas_gemm(
state,
'n', 'n',
n, m, k,
1,
THCudaTensor_data(state, columns), n,
THCudaTensor_data(state, weight), k,
1,
THCudaTensor_data(state, output_mu_n), n
);
}
// Free
THCudaTensor_free(state, input_mu_n);
THCudaTensor_free(state, input_var_n);
THCudaTensor_free(state, output_mu_n);
THCudaTensor_free(state, output_var_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(state, mu, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, var, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(state, input_mu, nInputPlane, inputHeight, inputWidth);
THCudaTensor_resize3d(state, input_var, nInputPlane, inputHeight, inputWidth);
}
// return output
return 2;
}
static const struct luaL_Reg stcunn_StochasticSpatialConvolution__ [] = {
{"StochasticSpatialConvolution_updateOutput", stcunn_StochasticSpatialConvolution_updateOutput},
{NULL, NULL}
};
static void stcunn_StochasticSpatialConvolution_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, stcunn_StochasticSpatialConvolution__, "nn");
lua_pop(L,1);
}
|
8e753019e2c58b07d9ce4917549fb0175c4c308e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/hip/HIPApplyUtils.cuh"
#include "ATen/hip/HIPContext.h"
#include "ATen/NativeFunctions.h"
#include "ATen/TensorUtils.h"
#include "ATen/Utils.h"
#include "c10/util/Exception.h"
#include <THH/THHGeneral.h>
#include "THH/THHNumerics.cuh"
#include <algorithm>
#include <cfloat>
#include <cmath>
#define START_IND(a,b,c) (int)::floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)::ceil((float)((a + 1) * c) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
namespace at {
namespace native {
namespace {
// 4d tensor B x D x H x W
// All kernels view batch dim B and feature dim D as collapsed.
/*
* Description:
* this function adaptively average pools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output
*/
template <typename T>
__global__ void adaptiveaveragepool(T *input, T *output,
int isizeH, int isizeW,
int osizeH, int osizeW,
int64_t istrideD, int64_t istrideH, int64_t istrideW)
{
// iterators on output pixels
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
output = output + o_plane*osizeH*osizeW;
input = input + i_plane*istrideD;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
const int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
const int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the average pooling over corresponding input pixels
T *ptr_input = input + istartH*istrideH + istartW*istrideW;
T *ptr_output = output + oh*osizeW + ow;
T sum = ScalarConvert<int, T>::to(0);
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
T val = ptr_input[iw*istrideW];
sum += val;
}
ptr_input += istrideH; // next input line
}
// Update output
*ptr_output = sum / kH / kW;
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
*/
template <typename T>
__global__ void adaptiveaveragegradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on input pixels
int ih, iw;
// select input/output plane based on thread/block ID
int i_plane = blockIdx.x;
int o_plane = i_plane;
gradOutput = gradOutput + o_plane*osizeH*osizeW;
gradInput = gradInput + i_plane*isizeH*isizeW;
int istartH = blockDim.y*blockIdx.y + threadIdx.y;
int iendH = isizeH;
int istepH = blockDim.y*gridDim.y;
int istartW = threadIdx.x;
int iendW = isizeW;
int istepW = blockDim.x;
// compute gradInput
for(ih = istartH; ih < iendH; ih += istepH) {
int ostartH = START_IND(ih, isizeH, osizeH);
int oendH = END_IND(ih, isizeH, osizeH);
for(iw = istartW; iw < iendW; iw += istepW) {
int ostartW = START_IND(iw, isizeW, osizeW);
int oendW = END_IND(iw, isizeW, osizeW);
// Compute the gradients over corresponding output pixels
T *ptr_gradInput = gradInput + ih*isizeW + iw;
int oh, ow;
for(oh = ostartH; oh < oendH; ++oh) {
int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH);
for(ow = ostartW; ow < oendW; ++ow) {
int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW);
T grad_delta = gradOutput[ow + oh*osizeW] / kH / kW;
*ptr_gradInput += grad_delta;
}
}
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* (uses atomic add)
*/
template <typename T>
__global__ void atomicadaptiveaveragegradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on output indices
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
gradOutput = gradOutput + o_plane*osizeW*osizeH;
gradInput = gradInput + i_plane*isizeW*isizeH;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the gradients for over corresponding input pixels
T *ptr_gradInput = gradInput + istartH*isizeW + istartW;
T *ptr_gradOutput = gradOutput + oh*osizeW + ow;
T grad_delta = *ptr_gradOutput / kW / kH;
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
// atomic add since different threads could update same variable
atomicAdd(&(ptr_gradInput[iw]), grad_delta);
}
ptr_gradInput += isizeW; // next input line
}
}
}
}
// 4d tensor B x D x H x W
void adaptive_avg_pool2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size)
{
TensorArg input_arg{ input, "input", 1 },
output_arg{ output, "output", 2 };
checkAllSameGPU("cudnn_adaptive_avg_pooling2d", {input_arg, output_arg});
for (int64_t i = 0; i < input.ndimension(); i++) {
TORCH_CHECK(input.size(i) > 0,
"adaptive_avg_pooling2d(): expected input to have non-empty spatial dimensions, "
"but input has sizes ", input.sizes(), " with dimension ", i, " being "
"empty");
}
TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
Tensor input_ = input;
int64_t grid_x = input.size(-3);
if (input.ndimension() == 4) {
input_ = input.contiguous();
grid_x *= input_.size(-4);
}
int64_t sizeD = input_.size(-3);
int64_t isizeH = input_.size(-2);
int64_t isizeW = input_.size(-1);
int64_t istrideD = input_.stride(-3);
int64_t istrideH = input_.stride(-2);
int64_t istrideW = input_.stride(-1);
int64_t osizeH = output_size[0];
int64_t osizeW = output_size[1];
if (input.ndimension() == 4) {
output.resize_({input_.size(-4), sizeD, osizeH, osizeW});
} else {
output.resize_({sizeD, osizeH, osizeW});
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input_.scalar_type(), "adaptive_avg_pool2d_cuda", [&] {
scalar_t *input_data = input_.data<scalar_t>();
scalar_t *output_data = output.data<scalar_t>();
// cuda blocks & threads:
int blocksH = std::max<int64_t>((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
// run averagepool kernel
hipLaunchKernelGGL(( adaptiveaveragepool) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_data, output_data,
isizeH, isizeW, osizeH, osizeW,
istrideD, istrideH, istrideW);
}
);
// THCudaCheck(hipGetLastError());
}
void adaptive_avg_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input)
{
TensorArg grad_input_arg{ gradInput, "gradInput", 1 },
grad_output_arg{ gradOutput_, "gradOutput_", 2 },
input_arg{ input, "input", 3 };
checkAllSameGPU("cudnn_adaptive_avg_pooling2d_out",
{grad_input_arg, grad_output_arg, input_arg});
bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests
Tensor gradOutput = gradOutput_.contiguous();
int64_t sizeD = input.size(-3);
int64_t isizeH = input.size(-2);
int64_t isizeW = input.size(-1);
int64_t osizeH = gradOutput.size(-2);
int64_t osizeW = gradOutput.size(-1);
int64_t grid_x = sizeD;
if (input.ndimension() == 4) grid_x *= input.size(-4);
//bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "adaptive_avg_pool2d_backward_cuda", [&] {
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
scalar_t *gradInput_data = gradInput.data<scalar_t>();
// cuda blocks & threads:
int blocksH = ::max((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( atomicadaptiveaveragegradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
}
else
{
// run updateGradInput kernel
hipLaunchKernelGGL(( adaptiveaveragegradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
}
}
);
// THCudaCheck(hipGetLastError());
}
} // namespace
Tensor& adaptive_avg_pool2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size)
{
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor adaptive_avg_pool2d_cuda(
at::Tensor const& input,
IntArrayRef output_size)
{
auto output = at::empty({0}, input.options());
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor& adaptive_avg_pool2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input)
{
gradInput.resize_as_(input);
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
return gradInput;
}
Tensor adaptive_avg_pool2d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input)
{
auto gradInput = at::zeros_like(input);
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
return gradInput;
}
} // at::native
} // at
#undef CUDA_MAX_THREADS
#undef START_IND
#undef END_IND
| 8e753019e2c58b07d9ce4917549fb0175c4c308e.cu | #include "ATen/ATen.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/NativeFunctions.h"
#include "ATen/TensorUtils.h"
#include "ATen/Utils.h"
#include "c10/util/Exception.h"
#include <THC/THCGeneral.h>
#include "THC/THCNumerics.cuh"
#include <algorithm>
#include <cfloat>
#include <cmath>
#define START_IND(a,b,c) (int)std::floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)std::ceil((float)((a + 1) * c) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
namespace at {
namespace native {
namespace {
// 4d tensor B x D x H x W
// All kernels view batch dim B and feature dim D as collapsed.
/*
* Description:
* this function adaptively average pools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output
*/
template <typename T>
__global__ void adaptiveaveragepool(T *input, T *output,
int isizeH, int isizeW,
int osizeH, int osizeW,
int64_t istrideD, int64_t istrideH, int64_t istrideW)
{
// iterators on output pixels
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
output = output + o_plane*osizeH*osizeW;
input = input + i_plane*istrideD;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
const int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
const int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the average pooling over corresponding input pixels
T *ptr_input = input + istartH*istrideH + istartW*istrideW;
T *ptr_output = output + oh*osizeW + ow;
T sum = ScalarConvert<int, T>::to(0);
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
T val = ptr_input[iw*istrideW];
sum += val;
}
ptr_input += istrideH; // next input line
}
// Update output
*ptr_output = sum / kH / kW;
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
*/
template <typename T>
__global__ void adaptiveaveragegradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on input pixels
int ih, iw;
// select input/output plane based on thread/block ID
int i_plane = blockIdx.x;
int o_plane = i_plane;
gradOutput = gradOutput + o_plane*osizeH*osizeW;
gradInput = gradInput + i_plane*isizeH*isizeW;
int istartH = blockDim.y*blockIdx.y + threadIdx.y;
int iendH = isizeH;
int istepH = blockDim.y*gridDim.y;
int istartW = threadIdx.x;
int iendW = isizeW;
int istepW = blockDim.x;
// compute gradInput
for(ih = istartH; ih < iendH; ih += istepH) {
int ostartH = START_IND(ih, isizeH, osizeH);
int oendH = END_IND(ih, isizeH, osizeH);
for(iw = istartW; iw < iendW; iw += istepW) {
int ostartW = START_IND(iw, isizeW, osizeW);
int oendW = END_IND(iw, isizeW, osizeW);
// Compute the gradients over corresponding output pixels
T *ptr_gradInput = gradInput + ih*isizeW + iw;
int oh, ow;
for(oh = ostartH; oh < oendH; ++oh) {
int kH = START_IND(oh, osizeH, isizeH) - END_IND(oh, osizeH, isizeH);
for(ow = ostartW; ow < oendW; ++ow) {
int kW = START_IND(ow, osizeW, isizeW) - END_IND(ow, osizeW, isizeW);
T grad_delta = gradOutput[ow + oh*osizeW] / kH / kW;
*ptr_gradInput += grad_delta;
}
}
}
}
}
/*
* Description:
* this function computes the gradInput from gradOutput
* (uses atomic add)
*/
template <typename T>
__global__ void atomicadaptiveaveragegradinput(
T *gradInput, T *gradOutput,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators on output indices
int oh, ow;
// select input/output plane based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
gradOutput = gradOutput + o_plane*osizeW*osizeH;
gradInput = gradInput + i_plane*isizeW*isizeH;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
int ostepH = blockDim.y*gridDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = START_IND(oh, osizeH, isizeH);
int iendH = END_IND(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = START_IND(ow, osizeW, isizeW);
int iendW = END_IND(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the gradients for over corresponding input pixels
T *ptr_gradInput = gradInput + istartH*isizeW + istartW;
T *ptr_gradOutput = gradOutput + oh*osizeW + ow;
T grad_delta = *ptr_gradOutput / kW / kH;
int ih, iw;
for(ih = 0; ih < kH; ++ih) {
for(iw = 0; iw < kW; ++iw) {
// atomic add since different threads could update same variable
atomicAdd(&(ptr_gradInput[iw]), grad_delta);
}
ptr_gradInput += isizeW; // next input line
}
}
}
}
// 4d tensor B x D x H x W
void adaptive_avg_pool2d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size)
{
TensorArg input_arg{ input, "input", 1 },
output_arg{ output, "output", 2 };
checkAllSameGPU("cudnn_adaptive_avg_pooling2d", {input_arg, output_arg});
for (int64_t i = 0; i < input.ndimension(); i++) {
TORCH_CHECK(input.size(i) > 0,
"adaptive_avg_pooling2d(): expected input to have non-empty spatial dimensions, "
"but input has sizes ", input.sizes(), " with dimension ", i, " being "
"empty");
}
TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
Tensor input_ = input;
int64_t grid_x = input.size(-3);
if (input.ndimension() == 4) {
input_ = input.contiguous();
grid_x *= input_.size(-4);
}
int64_t sizeD = input_.size(-3);
int64_t isizeH = input_.size(-2);
int64_t isizeW = input_.size(-1);
int64_t istrideD = input_.stride(-3);
int64_t istrideH = input_.stride(-2);
int64_t istrideW = input_.stride(-1);
int64_t osizeH = output_size[0];
int64_t osizeW = output_size[1];
if (input.ndimension() == 4) {
output.resize_({input_.size(-4), sizeD, osizeH, osizeW});
} else {
output.resize_({sizeD, osizeH, osizeW});
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input_.scalar_type(), "adaptive_avg_pool2d_cuda", [&] {
scalar_t *input_data = input_.data<scalar_t>();
scalar_t *output_data = output.data<scalar_t>();
// cuda blocks & threads:
int blocksH = std::max<int64_t>((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
// run averagepool kernel
adaptiveaveragepool <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> (
input_data, output_data,
isizeH, isizeW, osizeH, osizeW,
istrideD, istrideH, istrideW);
}
);
// THCudaCheck(cudaGetLastError());
}
void adaptive_avg_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input)
{
TensorArg grad_input_arg{ gradInput, "gradInput", 1 },
grad_output_arg{ gradOutput_, "gradOutput_", 2 },
input_arg{ input, "input", 3 };
checkAllSameGPU("cudnn_adaptive_avg_pooling2d_out",
{grad_input_arg, grad_output_arg, input_arg});
bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests
Tensor gradOutput = gradOutput_.contiguous();
int64_t sizeD = input.size(-3);
int64_t isizeH = input.size(-2);
int64_t isizeW = input.size(-1);
int64_t osizeH = gradOutput.size(-2);
int64_t osizeW = gradOutput.size(-1);
int64_t grid_x = sizeD;
if (input.ndimension() == 4) grid_x *= input.size(-4);
//bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "adaptive_avg_pool2d_backward_cuda", [&] {
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
scalar_t *gradInput_data = gradInput.data<scalar_t>();
// cuda blocks & threads:
int blocksH = std::max((int)(16L / sizeD), 1);
dim3 blocks(grid_x, blocksH);
dim3 threads(32, 8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
atomicadaptiveaveragegradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> (
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
}
else
{
// run updateGradInput kernel
adaptiveaveragegradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> (
gradInput_data, gradOutput_data,
isizeH, isizeW, osizeH, osizeW);
}
}
);
// THCudaCheck(cudaGetLastError());
}
} // namespace
Tensor& adaptive_avg_pool2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size)
{
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor adaptive_avg_pool2d_cuda(
at::Tensor const& input,
IntArrayRef output_size)
{
auto output = at::empty({0}, input.options());
adaptive_avg_pool2d_out_cuda_template(
output, input, output_size);
return output;
}
Tensor& adaptive_avg_pool2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input)
{
gradInput.resize_as_(input);
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
return gradInput;
}
Tensor adaptive_avg_pool2d_backward_cuda(
const Tensor& gradOutput,
const Tensor& input)
{
auto gradInput = at::zeros_like(input);
adaptive_avg_pool2d_backward_out_cuda_template(
gradInput, gradOutput, input);
return gradInput;
}
} // at::native
} // at
#undef CUDA_MAX_THREADS
#undef START_IND
#undef END_IND
|
b5da4933e7aa607b54510ec3d5db639918c571b2.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef COMMON_CU__
#define COMMON_CU__ 1
//typedef unsigned long uint32_t;
// Children are labeled as ACGT$
//#include <stdint.h>
#include<hip/hip_vector_types.h>
const int basecount = 5;
// Note: max pixel size is 16 bytes
const unsigned char DNA_A = 'A';
const unsigned char DNA_C = 'B';
const unsigned char DNA_G = 'C';
const unsigned char DNA_T = 'D';
const unsigned char DNA_S = 'E';
// 4 bytes
struct TextureAddress
{
union
{
unsigned int data;
struct
{
unsigned short x;
unsigned short y;
};
};
};
// Store the start, end coordinate of node, and $link in 1 pixel
struct PixelOfNode
{
union
{
ulong4 data;
struct
{
int start;
int end;
TextureAddress childD;
TextureAddress suffix;
};
};
};
// Store the ACGT links in 1 pixel
struct PixelOfChildren
{
union
{
ulong4 data;
TextureAddress children[4];
};
};
#define FORWARD 0x0000
#define REVERSE 0x8000
#define FRMASK 0x8000
#define FRUMASK 0x7FFF
#endif
| b5da4933e7aa607b54510ec3d5db639918c571b2.cu | #ifndef COMMON_CU__
#define COMMON_CU__ 1
//typedef unsigned long uint32_t;
// Children are labeled as ACGT$
//#include <stdint.h>
#include<vector_types.h>
const int basecount = 5;
// Note: max pixel size is 16 bytes
const unsigned char DNA_A = 'A';
const unsigned char DNA_C = 'B';
const unsigned char DNA_G = 'C';
const unsigned char DNA_T = 'D';
const unsigned char DNA_S = 'E';
// 4 bytes
struct TextureAddress
{
union
{
unsigned int data;
struct
{
unsigned short x;
unsigned short y;
};
};
};
// Store the start, end coordinate of node, and $link in 1 pixel
struct PixelOfNode
{
union
{
ulong4 data;
struct
{
int start;
int end;
TextureAddress childD;
TextureAddress suffix;
};
};
};
// Store the ACGT links in 1 pixel
struct PixelOfChildren
{
union
{
ulong4 data;
TextureAddress children[4];
};
};
#define FORWARD 0x0000
#define REVERSE 0x8000
#define FRMASK 0x8000
#define FRUMASK 0x7FFF
#endif
|
5ae6214d352eb1ae91ea4a3c6240d02bacb115e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2008 BOROUJERDI Maxime. Tous droits reserves.
*/
#include <cstdlib>
#include <cstdio>
#include <cstring>
#include <cmath>
#include "makebmp.h"
#include <new>
#include "../../mem_alloc/mem_alloc.h"
#include <cutil.h>
#include <helper_timer.h>
#include <rayTracing_kernel.cu>
#define PI 3.141592654f
#define Angle(a) ((a*PI)/180.0)
int g_verbose;
int t = 1;
class Observateur
{
private:
matrice3x4 M; // U, V, W
float df; // distance focale
public:
Observateur( );
Observateur(const float3 &, const float3 &, const float3 &, double );
inline const matrice3x4 & getMatrice( ) const { return M; }
inline float getDistance( ) const { return df; }
};
Observateur::Observateur()
{
M.m[0] = make_float4(0.0f,0.0f,1.0f,0.0f);
M.m[1] = make_float4(0.0f,1.0f,0.0f,0.0f);
M.m[2] = make_float4(1.0f,0.0f,0.0f,0.0f);
df = 1.0 / tan(Angle(65)/2.0);
}
Observateur::Observateur(const float3 & p, const float3 & u, const float3 & v, double a )
{
float3 VP, U, V, W;
VP = normalize(v);
U = normalize(u);
V = normalize(VP - dot(U,VP)*U);
W = normalize(cross(U,V));
M.m[0] = make_float4(U.x,U.y,U.z,p.x);
M.m[1] = make_float4(V.x,V.y,V.z,p.y);
M.m[2] = make_float4(W.x,W.y,W.z,p.z);
df = 1.0 / tan(Angle(a)/2.0);
}
Observateur obs = Observateur(make_float3(0.0f,0.5f,2.0f),normalize(make_float3(0.0f,0.0f,0.0f)-make_float3(0.0f,0.5f,2.0f)),make_float3(0.0f,1.0f,0.0f),65.0f);
#include <rayTracing_kernel.cu>
unsigned width = 64; //640; //512; //16; //32; //512;
unsigned height = 64; //480; //512; //16;//512;
dim3 blockSize(16,8);
dim3 gridSize(width/blockSize.x, height/blockSize.y);
StopWatchInterface *timer=NULL;
uint * c_output, * d_output;
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void initPixelBuffer()
{
//int num = width * height;
//float phi = 2.0f/(float)min(width,height);
gridSize = dim3(iDivUp(width, blockSize.x), iDivUp(height, blockSize.y));
};
// Rendu de l'image avec CUDA
void render(Object** objList, int n)
{
sdkStartTimer(&timer);
//render<<<gridSize, blockSize>>>(d_output, objList, width, height, obs.getDistance(), n);
hipLaunchKernelGGL(( render) , dim3(gridSize), dim3(blockSize), 0, 0, d_output, objList, width, height, obs.getDistance(), n);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
sdkStopTimer(&timer);
CUDA_SAFE_CALL( hipMemcpy( c_output, d_output, width*height*sizeof(uint), hipMemcpyDeviceToHost) );
unsigned long long int checksum = 0;
for (int y=(height-1); y >= 0; y--){
if (g_verbose) printf("\n");
for (int x=0; x< width; x++) {
if (g_verbose) printf("%010u ", (unsigned) c_output[x+y*width]);
checksum += c_output[x+y*width];
}
}
printf("\n");
printf("checksum=%llx\n", checksum);
}
// Affichage du resultat avec OpenGL
void display(Object** objList, int n)
{
// Affichage du resultat
render(objList, n);
printf("Kernel Time: %f \n", sdkGetTimerValue(&timer));
t--;
if (!t) {
return;
}
}
////////////////////////////////////////////////////////////////////////////////
// Programme principal
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// initialise card and timer
int deviceCount;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem,atoll(argv[4]));
CUDA_SAFE_CALL(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
}
else
CUDA_SAFE_CALL(hipSetDevice(dev));
int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 5) {
width = atoi(argv[1]);
height = atoi(argv[2]);
for (i=5; i < argc;i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v': g_verbose = 1;
break;
default: commandline_error=1;
}
}
else commandline_error=1;
}
} else commandline_error=1;
if (commandline_error || !width || !height) {
printf("Usage: ./rayTracing <WIDTH> <HEIGHT> [-v]\n %d ", argc);
printf("where WIDTH and HEIGHT are the screen dimensions and -v is used to display an abstract representation of the output.\n");
return 1;
}
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
initialize_bmp(width,height,32);
Object** objList;
int n=atoi(argv[3]);
float* A;
float* d_A;
A = (float *)malloc(n * 8 * sizeof(float));
d_A = (float *)my_obj_alloc.calloc<float>(n * 8);
srand(47);
A[0] = 0.0f; A[1] = 1.0f; A[2] = 1.0f; A[3] = 1.0f; A[4] = 0.0f; A[5] = -1.5f; A[6] = -0.0f; A[7] = 0.5f;
A[8] = 1.0f; A[8 + 1] = 0.0f; A[8 + 2] = 0.0f; A[8 + 3] = 1.0f; A[8 + 4] = -1.0f; A[8 + 5] = 0.0f; A[8 + 6] = -1.0f; A[8 + 7] = 0.5f;
A[16] = 0.0f; A[16 + 1] = 0.0f; A[16 + 2] = 1.0f; A[16 + 3] = 1.0f; A[16 + 4] = 1.0f; A[16 + 5] = -0.0f; A[16 + 6] = -1.0f; A[16 + 7] = 0.5f;
A[24] = 0.0f; A[24 + 1] = 1.0f; A[24 + 2] = 0.0f; A[24 + 3] = 1.0f; A[24 + 4] = 0.0f; A[24 + 5] = -0.0f; A[24 + 6] = -2.0f; A[24 + 7] = 0.75f;
for( int i(4); i < n; i++ ) {
float r,v,b;
float tmp1(5.0f*((r=(float(rand()%255)/255.0f)))-2.5f);
float tmp2(5.0f*((v=(float(rand()%255)/255.0f)))-2.5f);
float tmp3(-5.0f*((b=(float(rand()%255)/255.0f))));
float tmp4((rand()%100)/100.0f);
A[i * 8 + 4] = tmp1; A[i * 8 + 5] = tmp2; A[i * 8 + 6] = tmp3; A[i * 8 + 7] = tmp4;
A[i * 8] = r; A[i * 8 + 1] = v; A[i * 8 + 2] = b; A[i * 8 + 3] = 1.0f;
}
hipMemcpy(d_A, A, n * 8 * sizeof(float), hipMemcpyHostToDevice);
objList = (Object **)my_obj_alloc.calloc<Object *>(n);
int threadsPerBlock = 256;
int blocksPerGrid =(n + threadsPerBlock - 1) / threadsPerBlock;
initObject(objList, d_A, n,&my_obj_alloc);
hipLaunchKernelGGL(( initObject_kern), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, objList, d_A, n);
my_obj_alloc.create_tree();
range_tree = my_obj_alloc.get_range_tree();
tree_size_g = my_obj_alloc.get_tree_size();
hipDeviceSynchronize();
c_output = (uint*) calloc(width*height, sizeof(uint));
CUDA_SAFE_CALL( hipMalloc( (void**)&d_output, width*height*sizeof(uint)));
CUDA_SAFE_CALL( hipMemcpyToSymbol(MView, (void*)&obs, 3*sizeof(float4)) );
initPixelBuffer();
display(objList, n);
create_bmp(c_output);
sdkDeleteTimer(&timer);
return 0;
}
| 5ae6214d352eb1ae91ea4a3c6240d02bacb115e9.cu | /*
* Copyright 2008 BOROUJERDI Maxime. Tous droits reserves.
*/
#include <cstdlib>
#include <cstdio>
#include <cstring>
#include <cmath>
#include "makebmp.h"
#include <new>
#include "../../mem_alloc/mem_alloc.h"
#include <cutil.h>
#include <helper_timer.h>
#include <rayTracing_kernel.cu>
#define PI 3.141592654f
#define Angle(a) ((a*PI)/180.0)
int g_verbose;
int t = 1;
class Observateur
{
private:
matrice3x4 M; // U, V, W
float df; // distance focale
public:
Observateur( );
Observateur(const float3 &, const float3 &, const float3 &, double );
inline const matrice3x4 & getMatrice( ) const { return M; }
inline float getDistance( ) const { return df; }
};
Observateur::Observateur()
{
M.m[0] = make_float4(0.0f,0.0f,1.0f,0.0f);
M.m[1] = make_float4(0.0f,1.0f,0.0f,0.0f);
M.m[2] = make_float4(1.0f,0.0f,0.0f,0.0f);
df = 1.0 / tan(Angle(65)/2.0);
}
Observateur::Observateur(const float3 & p, const float3 & u, const float3 & v, double a )
{
float3 VP, U, V, W;
VP = normalize(v);
U = normalize(u);
V = normalize(VP - dot(U,VP)*U);
W = normalize(cross(U,V));
M.m[0] = make_float4(U.x,U.y,U.z,p.x);
M.m[1] = make_float4(V.x,V.y,V.z,p.y);
M.m[2] = make_float4(W.x,W.y,W.z,p.z);
df = 1.0 / tan(Angle(a)/2.0);
}
Observateur obs = Observateur(make_float3(0.0f,0.5f,2.0f),normalize(make_float3(0.0f,0.0f,0.0f)-make_float3(0.0f,0.5f,2.0f)),make_float3(0.0f,1.0f,0.0f),65.0f);
#include <rayTracing_kernel.cu>
unsigned width = 64; //640; //512; //16; //32; //512;
unsigned height = 64; //480; //512; //16;//512;
dim3 blockSize(16,8);
dim3 gridSize(width/blockSize.x, height/blockSize.y);
StopWatchInterface *timer=NULL;
uint * c_output, * d_output;
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void initPixelBuffer()
{
//int num = width * height;
//float phi = 2.0f/(float)min(width,height);
gridSize = dim3(iDivUp(width, blockSize.x), iDivUp(height, blockSize.y));
};
// Rendu de l'image avec CUDA
void render(Object** objList, int n)
{
sdkStartTimer(&timer);
//render<<<gridSize, blockSize>>>(d_output, objList, width, height, obs.getDistance(), n);
render <<<gridSize, blockSize>>>(d_output, objList, width, height, obs.getDistance(), n);
CUDA_SAFE_CALL( cudaDeviceSynchronize() );
sdkStopTimer(&timer);
CUDA_SAFE_CALL( cudaMemcpy( c_output, d_output, width*height*sizeof(uint), cudaMemcpyDeviceToHost) );
unsigned long long int checksum = 0;
for (int y=(height-1); y >= 0; y--){
if (g_verbose) printf("\n");
for (int x=0; x< width; x++) {
if (g_verbose) printf("%010u ", (unsigned) c_output[x+y*width]);
checksum += c_output[x+y*width];
}
}
printf("\n");
printf("checksum=%llx\n", checksum);
}
// Affichage du resultat avec OpenGL
void display(Object** objList, int n)
{
// Affichage du resultat
render(objList, n);
printf("Kernel Time: %f \n", sdkGetTimerValue(&timer));
t--;
if (!t) {
return;
}
}
////////////////////////////////////////////////////////////////////////////////
// Programme principal
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// initialise card and timer
int deviceCount;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem,atoll(argv[4]));
CUDA_SAFE_CALL(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
}
else
CUDA_SAFE_CALL(cudaSetDevice(dev));
int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 5) {
width = atoi(argv[1]);
height = atoi(argv[2]);
for (i=5; i < argc;i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v': g_verbose = 1;
break;
default: commandline_error=1;
}
}
else commandline_error=1;
}
} else commandline_error=1;
if (commandline_error || !width || !height) {
printf("Usage: ./rayTracing <WIDTH> <HEIGHT> [-v]\n %d ", argc);
printf("where WIDTH and HEIGHT are the screen dimensions and -v is used to display an abstract representation of the output.\n");
return 1;
}
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
initialize_bmp(width,height,32);
Object** objList;
int n=atoi(argv[3]);
float* A;
float* d_A;
A = (float *)malloc(n * 8 * sizeof(float));
d_A = (float *)my_obj_alloc.calloc<float>(n * 8);
srand(47);
A[0] = 0.0f; A[1] = 1.0f; A[2] = 1.0f; A[3] = 1.0f; A[4] = 0.0f; A[5] = -1.5f; A[6] = -0.0f; A[7] = 0.5f;
A[8] = 1.0f; A[8 + 1] = 0.0f; A[8 + 2] = 0.0f; A[8 + 3] = 1.0f; A[8 + 4] = -1.0f; A[8 + 5] = 0.0f; A[8 + 6] = -1.0f; A[8 + 7] = 0.5f;
A[16] = 0.0f; A[16 + 1] = 0.0f; A[16 + 2] = 1.0f; A[16 + 3] = 1.0f; A[16 + 4] = 1.0f; A[16 + 5] = -0.0f; A[16 + 6] = -1.0f; A[16 + 7] = 0.5f;
A[24] = 0.0f; A[24 + 1] = 1.0f; A[24 + 2] = 0.0f; A[24 + 3] = 1.0f; A[24 + 4] = 0.0f; A[24 + 5] = -0.0f; A[24 + 6] = -2.0f; A[24 + 7] = 0.75f;
for( int i(4); i < n; i++ ) {
float r,v,b;
float tmp1(5.0f*((r=(float(rand()%255)/255.0f)))-2.5f);
float tmp2(5.0f*((v=(float(rand()%255)/255.0f)))-2.5f);
float tmp3(-5.0f*((b=(float(rand()%255)/255.0f))));
float tmp4((rand()%100)/100.0f);
A[i * 8 + 4] = tmp1; A[i * 8 + 5] = tmp2; A[i * 8 + 6] = tmp3; A[i * 8 + 7] = tmp4;
A[i * 8] = r; A[i * 8 + 1] = v; A[i * 8 + 2] = b; A[i * 8 + 3] = 1.0f;
}
cudaMemcpy(d_A, A, n * 8 * sizeof(float), cudaMemcpyHostToDevice);
objList = (Object **)my_obj_alloc.calloc<Object *>(n);
int threadsPerBlock = 256;
int blocksPerGrid =(n + threadsPerBlock - 1) / threadsPerBlock;
initObject(objList, d_A, n,&my_obj_alloc);
initObject_kern<<<blocksPerGrid, threadsPerBlock>>>(objList, d_A, n);
my_obj_alloc.create_tree();
range_tree = my_obj_alloc.get_range_tree();
tree_size_g = my_obj_alloc.get_tree_size();
cudaDeviceSynchronize();
c_output = (uint*) calloc(width*height, sizeof(uint));
CUDA_SAFE_CALL( cudaMalloc( (void**)&d_output, width*height*sizeof(uint)));
CUDA_SAFE_CALL( cudaMemcpyToSymbol(MView, (void*)&obs, 3*sizeof(float4)) );
initPixelBuffer();
display(objList, n);
create_bmp(c_output);
sdkDeleteTimer(&timer);
return 0;
}
|
4cda9ab8b710bfa94cfe6d56998f12540e5d6699.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <limits.h>
#include <float.h>
#include <time.h>
#include <sys/time.h>
#include <sys/errno.h>
#include <omp.h>
void MMScan(float ***X, float ***Y, long start, long end, long size) {
long n, i, j, k;
for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) {
Y[start][i][j] = X[start][i][j];
}
}
#ifdef FAUX // incorrect parallelization
#pragma omp parallel for
#endif // incorrect parallelization
for (n = start+1; n <= end; ++n) {
for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) {
float acc = 0;
for (k = 0; k < size; ++k) {
acc = acc + Y[n-1][i][k] * X[n][k][j];
}
Y[n][i][j] = acc;
}
}
}
}
__global__
void phase1(float *x, float *r1, long N, long B) {
int threadDim = blockDim.x; // Contains the dimensions of each thread block as specified by numThreadsInThreadBlocks
int threadRow = threadIdx.x; // Contains the index of the thread within its thread block
int threadCol = threadIdx.y;
int g = blockIdx.x; // Contains the thread block within the grid
int G = gridDim.x; // Contains the dimensions of the grid as specified by numThreadBlocks
int n = N / G;
// Read the matrix into shared memory.
extern __shared__ float shared[];
float *x_temp = x;
float *r1_temp = r1;
float *A = (float*)&shared[0];
float *C = (float*)&shared[B*B];
float *temp = (float*)&shared[2*B*B];
// Read into shared A
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
A[j*B + k] = x_temp[(g*n)*B*B + j*B + k];
}
}
for (int i = 1; i < n; ++i) {
// Read into shared C
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
C[j*B + k] = x_temp[(g*n+i)*B*B + j*B + k];
}
}
__syncthreads();
// Multiply AxC = temp
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
float acc = 0.0f;
for (int a = 0; a < B; ++a) {
acc += A[a*B + k] * C[j*B + a];
}
temp[j*B + k] = acc;
}
}
// Copy temp back to A
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
A[j*B + k] = temp[j*B + k];
}
}
}
__syncthreads();
// Copy back to R1
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
r1_temp[g*B*B + j*B + k] = temp[j*B + k];
}
}
}
__global__
void phase2(float *r1, float *r2, long G, long B) {
int threadDim = blockDim.x; // Contains the dimensions of each thread block as specified by numThreadsInThreadBlocks
int threadRow = threadIdx.x; // Contains the index of the thread within its thread block
int threadCol = threadIdx.y;
// Read the matrix into shared memory.
extern __shared__ float shared[];
float *r1_temp = r1;
float *r2_temp = r2;
float *A = (float*)&shared[0];
float *C = (float*)&shared[B*B];
// Read into shared A
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
r2_temp[1*B*B + j*B + k] = A[j*B + k] = r1_temp[0 + j*B + k];
}
}
for (int i = 2; i < G; ++i) {
// Read into shared C
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
C[j*B + k] = r1_temp[(i-1)*B*B + j*B + k];
}
}
__syncthreads();
// Multiply AxC = r2[i]
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
float acc = 0.0f;
for (int a = 0; a < B; ++a) {
acc += A[a*B + k] * C[j*B + a];
}
r2_temp[i*B*B + j*B + k] = acc;
}
}
__syncthreads();
// Copy r2[i] back to A
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
A[j*B + k] = r2_temp[i*B*B + j*B + k];
}
}
}
}
__global__
void phase3(float *x, float *r2, float *y, long N, long B) {
int threadDim = blockDim.x; // Contains the dimensions of each thread block as specified by numThreadsInThreadBlocks
int threadRow = threadIdx.x; // Contains the index of the thread within its thread block
int threadCol = threadIdx.y;
int g = blockIdx.x; // Contains the thread block within the grid
int G = gridDim.x; // Contains the dimensions of the grid as specified by numThreadBlocks
int n = N / G;
// Read the matrix into shared memory.
extern __shared__ float shared[];
float *x_temp = x;
float *r2_temp = r2;
float *y_temp = y;
float *A = (float*)&shared[0];
float *C = (float*)&shared[B*B];
float *T = (float*)&shared[2*B*B];
// Read each element of r2 into the full block of y
for (int i = n*g; i < n*g + n; ++i) {
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
y_temp[i*B*B + j*B + k] = r2_temp[g*B*B + j*B + k];
}
}
}
__syncthreads();
for (int yi = n*g; yi < n*g + n; ++yi) {
__syncthreads();
// Read into shared A, X[0]xR2[0]
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
if (g == 0) {
A[j*B + k] = x_temp[0 + j*B + k];
} else {
float acc = 0.0f;
for (int a = 0; a < B; ++a) {
acc += r2_temp[g*B*B + a*B + k] * x_temp[g*n*B*B + j*B + a];
}
A[j*B + k] = acc;
}
}
}
for (int xi = n*g + 1; xi < yi + 1; ++xi) {
// Read into shared C
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
C[j*B + k] = x_temp[xi*B*B + j*B + k];
}
}
__syncthreads();
// Multiply AxC = y[i]
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
float acc = 0.0f;
for (int a = 0; a < B; ++a) {
acc += A[a*B + k] * C[j*B + a];
}
T[j*B + k] = acc;
}
}
// Copy T back to A
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
A[j*B + k] = T[j*B + k];
}
}
}
// Copy A back to y[i]
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
y_temp[yi*B*B + j*B + k] = A[j*B + k];
}
}
}
}
| 4cda9ab8b710bfa94cfe6d56998f12540e5d6699.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <string.h>
#include <limits.h>
#include <float.h>
#include <time.h>
#include <sys/time.h>
#include <sys/errno.h>
#include <omp.h>
void MMScan(float ***X, float ***Y, long start, long end, long size) {
long n, i, j, k;
for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) {
Y[start][i][j] = X[start][i][j];
}
}
#ifdef FAUX // incorrect parallelization
#pragma omp parallel for
#endif // incorrect parallelization
for (n = start+1; n <= end; ++n) {
for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) {
float acc = 0;
for (k = 0; k < size; ++k) {
acc = acc + Y[n-1][i][k] * X[n][k][j];
}
Y[n][i][j] = acc;
}
}
}
}
__global__
void phase1(float *x, float *r1, long N, long B) {
int threadDim = blockDim.x; // Contains the dimensions of each thread block as specified by numThreadsInThreadBlocks
int threadRow = threadIdx.x; // Contains the index of the thread within its thread block
int threadCol = threadIdx.y;
int g = blockIdx.x; // Contains the thread block within the grid
int G = gridDim.x; // Contains the dimensions of the grid as specified by numThreadBlocks
int n = N / G;
// Read the matrix into shared memory.
extern __shared__ float shared[];
float *x_temp = x;
float *r1_temp = r1;
float *A = (float*)&shared[0];
float *C = (float*)&shared[B*B];
float *temp = (float*)&shared[2*B*B];
// Read into shared A
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
A[j*B + k] = x_temp[(g*n)*B*B + j*B + k];
}
}
for (int i = 1; i < n; ++i) {
// Read into shared C
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
C[j*B + k] = x_temp[(g*n+i)*B*B + j*B + k];
}
}
__syncthreads();
// Multiply AxC = temp
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
float acc = 0.0f;
for (int a = 0; a < B; ++a) {
acc += A[a*B + k] * C[j*B + a];
}
temp[j*B + k] = acc;
}
}
// Copy temp back to A
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
A[j*B + k] = temp[j*B + k];
}
}
}
__syncthreads();
// Copy back to R1
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
r1_temp[g*B*B + j*B + k] = temp[j*B + k];
}
}
}
__global__
void phase2(float *r1, float *r2, long G, long B) {
int threadDim = blockDim.x; // Contains the dimensions of each thread block as specified by numThreadsInThreadBlocks
int threadRow = threadIdx.x; // Contains the index of the thread within its thread block
int threadCol = threadIdx.y;
// Read the matrix into shared memory.
extern __shared__ float shared[];
float *r1_temp = r1;
float *r2_temp = r2;
float *A = (float*)&shared[0];
float *C = (float*)&shared[B*B];
// Read into shared A
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
r2_temp[1*B*B + j*B + k] = A[j*B + k] = r1_temp[0 + j*B + k];
}
}
for (int i = 2; i < G; ++i) {
// Read into shared C
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
C[j*B + k] = r1_temp[(i-1)*B*B + j*B + k];
}
}
__syncthreads();
// Multiply AxC = r2[i]
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
float acc = 0.0f;
for (int a = 0; a < B; ++a) {
acc += A[a*B + k] * C[j*B + a];
}
r2_temp[i*B*B + j*B + k] = acc;
}
}
__syncthreads();
// Copy r2[i] back to A
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
A[j*B + k] = r2_temp[i*B*B + j*B + k];
}
}
}
}
__global__
void phase3(float *x, float *r2, float *y, long N, long B) {
int threadDim = blockDim.x; // Contains the dimensions of each thread block as specified by numThreadsInThreadBlocks
int threadRow = threadIdx.x; // Contains the index of the thread within its thread block
int threadCol = threadIdx.y;
int g = blockIdx.x; // Contains the thread block within the grid
int G = gridDim.x; // Contains the dimensions of the grid as specified by numThreadBlocks
int n = N / G;
// Read the matrix into shared memory.
extern __shared__ float shared[];
float *x_temp = x;
float *r2_temp = r2;
float *y_temp = y;
float *A = (float*)&shared[0];
float *C = (float*)&shared[B*B];
float *T = (float*)&shared[2*B*B];
// Read each element of r2 into the full block of y
for (int i = n*g; i < n*g + n; ++i) {
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
y_temp[i*B*B + j*B + k] = r2_temp[g*B*B + j*B + k];
}
}
}
__syncthreads();
for (int yi = n*g; yi < n*g + n; ++yi) {
__syncthreads();
// Read into shared A, X[0]xR2[0]
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
if (g == 0) {
A[j*B + k] = x_temp[0 + j*B + k];
} else {
float acc = 0.0f;
for (int a = 0; a < B; ++a) {
acc += r2_temp[g*B*B + a*B + k] * x_temp[g*n*B*B + j*B + a];
}
A[j*B + k] = acc;
}
}
}
for (int xi = n*g + 1; xi < yi + 1; ++xi) {
// Read into shared C
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
C[j*B + k] = x_temp[xi*B*B + j*B + k];
}
}
__syncthreads();
// Multiply AxC = y[i]
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
float acc = 0.0f;
for (int a = 0; a < B; ++a) {
acc += A[a*B + k] * C[j*B + a];
}
T[j*B + k] = acc;
}
}
// Copy T back to A
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
A[j*B + k] = T[j*B + k];
}
}
}
// Copy A back to y[i]
for (int j = threadRow*threadDim; j < threadRow*threadDim + threadDim; ++j) {
for (int k = threadCol*threadDim; k < threadCol*threadDim + threadDim; ++k) {
y_temp[yi*B*B + j*B + k] = A[j*B + k];
}
}
}
}
|
0f3c32bf826483c385d766a85650fb8df87f9414.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 19-Oct-2012 16:21:04
//
// user function
__device__
#include "updateUR.h"
// CUDA kernel function
__global__ void op_cuda_updateUR(
double *arg0,
double *arg1,
double *arg2,
double *arg3,
const double *arg4,
int offset_s,
int set_size ) {
// process set elements
for (int n=threadIdx.x+blockIdx.x*blockDim.x;
n<set_size; n+=blockDim.x*gridDim.x) {
// user-supplied kernel call
updateUR( arg0+n,
arg1+n,
arg2+n,
arg3+n,
arg4 );
}
}
// host stub function
void op_par_loop_updateUR(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4 ){
double *arg4h = (double *)arg4.data;
int nargs = 5;
op_arg args[5];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: updateUR\n");
}
op_mpi_halo_exchanges(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0;
op_timing_realloc(5);
OP_kernels[5].name = name;
OP_kernels[5].count += 1;
if (set->size >0) {
op_timers_core(&cpu_t1, &wall_t1);
// transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(double));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg4.data = OP_consts_h + consts_bytes;
arg4.data_d = OP_consts_d + consts_bytes;
for (int d=0; d<1; d++) ((double *)arg4.data)[d] = arg4h[d];
consts_bytes += ROUND_UP(1*sizeof(double));
mvConstArraysToDevice(consts_bytes);
// set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_5
int nthread = OP_BLOCK_SIZE_5;
#else
// int nthread = OP_block_size;
int nthread = 128;
#endif
int nblocks = 200;
// work out shared memory requirements per element
int nshared = 0;
// execute plan
int offset_s = nshared*OP_WARPSIZE;
nshared = nshared*nthread;
hipLaunchKernelGGL(( op_cuda_updateUR), dim3(nblocks),dim3(nthread),nshared, 0, (double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
(double *) arg3.data_d,
(double *) arg4.data_d,
offset_s,
set->size );
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_updateUR execution failed\n");
}
op_mpi_set_dirtybit(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[5].time += wall_t2 - wall_t1;
OP_kernels[5].transfer += (float)set->size * arg0.size * 2.0f;
OP_kernels[5].transfer += (float)set->size * arg1.size * 2.0f;
OP_kernels[5].transfer += (float)set->size * arg2.size;
OP_kernels[5].transfer += (float)set->size * arg3.size * 2.0f;
}
| 0f3c32bf826483c385d766a85650fb8df87f9414.cu | //
// auto-generated by op2.m on 19-Oct-2012 16:21:04
//
// user function
__device__
#include "updateUR.h"
// CUDA kernel function
__global__ void op_cuda_updateUR(
double *arg0,
double *arg1,
double *arg2,
double *arg3,
const double *arg4,
int offset_s,
int set_size ) {
// process set elements
for (int n=threadIdx.x+blockIdx.x*blockDim.x;
n<set_size; n+=blockDim.x*gridDim.x) {
// user-supplied kernel call
updateUR( arg0+n,
arg1+n,
arg2+n,
arg3+n,
arg4 );
}
}
// host stub function
void op_par_loop_updateUR(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4 ){
double *arg4h = (double *)arg4.data;
int nargs = 5;
op_arg args[5];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: updateUR\n");
}
op_mpi_halo_exchanges(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0;
op_timing_realloc(5);
OP_kernels[5].name = name;
OP_kernels[5].count += 1;
if (set->size >0) {
op_timers_core(&cpu_t1, &wall_t1);
// transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(double));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg4.data = OP_consts_h + consts_bytes;
arg4.data_d = OP_consts_d + consts_bytes;
for (int d=0; d<1; d++) ((double *)arg4.data)[d] = arg4h[d];
consts_bytes += ROUND_UP(1*sizeof(double));
mvConstArraysToDevice(consts_bytes);
// set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_5
int nthread = OP_BLOCK_SIZE_5;
#else
// int nthread = OP_block_size;
int nthread = 128;
#endif
int nblocks = 200;
// work out shared memory requirements per element
int nshared = 0;
// execute plan
int offset_s = nshared*OP_WARPSIZE;
nshared = nshared*nthread;
op_cuda_updateUR<<<nblocks,nthread,nshared>>>( (double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
(double *) arg3.data_d,
(double *) arg4.data_d,
offset_s,
set->size );
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg("op_cuda_updateUR execution failed\n");
}
op_mpi_set_dirtybit(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[5].time += wall_t2 - wall_t1;
OP_kernels[5].transfer += (float)set->size * arg0.size * 2.0f;
OP_kernels[5].transfer += (float)set->size * arg1.size * 2.0f;
OP_kernels[5].transfer += (float)set->size * arg2.size;
OP_kernels[5].transfer += (float)set->size * arg3.size * 2.0f;
}
|
476a429b7b1652251f34306dedd57f6834ad16cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ int satisfies(int i, int j, int *A, int *B)
{
return (A[i] <= B[j]);
}
__global__ void MergePath(int *A, int *B, int* C, int *x, int *y, int n)
{
int num_of_threads = blockDim.x;
int idx = threadIdx.x;
bool flag = false;
if (idx == 0)
{
x[idx] = 0;
y[idx] = 0;
flag = true;
}
int A_start = idx*(2 * n) / num_of_threads; //only when len(A)==len(B)
int B_start = max(0, A_start - (n - 1));
A_start = min(n - 1, A_start);
int length_of_array;
if (B_start == 0)
{
length_of_array = A_start + 1;
}
else
length_of_array = n - B_start;
int left = 0, right = length_of_array - 1;
// cout<<A_start<<" "<<B_start<<" "<<length_of_array<<endl<<"-------------------------------------------\n";
while (left <= right && !flag)
{
// cout<<left<<" "<<right<<endl;
int mid = left + (right - left) / 2;
int I = A_start - mid;
int J = B_start + mid;
if (!satisfies(I, J, A, B))
{
left = mid + 1;
}
else
{
if (J == 0)
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else if (I == n - 1)
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else
{
if (!satisfies(I + 1, J - 1, A, B))
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else
{
right = mid;
}
}
}
}
left--;
if (!flag)
{
x[idx] = (A_start - left);
y[idx] = (n);
}
__syncthreads();
int end_x, end_y;
if (idx == num_of_threads - 1)
{
end_x = n;
end_y = n;
}
else
{
end_x = x[idx + 1];
end_y = y[idx + 1];
}
int cur_x = x[idx];
int cur_y = y[idx];
int put_at = cur_x + cur_y;
while (cur_x<end_x && cur_y<end_y)
{
if (A[cur_x] <= B[cur_y])
{
C[put_at++] = A[cur_x++];
}
else
{
C[put_at++] = B[cur_y++];
}
}
while (cur_x<end_x)
C[put_at++] = A[cur_x++];
while (cur_y<end_y)
C[put_at++] = B[cur_y++];
} | 476a429b7b1652251f34306dedd57f6834ad16cc.cu | #include "includes.h"
__device__ int satisfies(int i, int j, int *A, int *B)
{
return (A[i] <= B[j]);
}
__global__ void MergePath(int *A, int *B, int* C, int *x, int *y, int n)
{
int num_of_threads = blockDim.x;
int idx = threadIdx.x;
bool flag = false;
if (idx == 0)
{
x[idx] = 0;
y[idx] = 0;
flag = true;
}
int A_start = idx*(2 * n) / num_of_threads; //only when len(A)==len(B)
int B_start = max(0, A_start - (n - 1));
A_start = min(n - 1, A_start);
int length_of_array;
if (B_start == 0)
{
length_of_array = A_start + 1;
}
else
length_of_array = n - B_start;
int left = 0, right = length_of_array - 1;
// cout<<A_start<<" "<<B_start<<" "<<length_of_array<<endl<<"-------------------------------------------\n";
while (left <= right && !flag)
{
// cout<<left<<" "<<right<<endl;
int mid = left + (right - left) / 2;
int I = A_start - mid;
int J = B_start + mid;
if (!satisfies(I, J, A, B))
{
left = mid + 1;
}
else
{
if (J == 0)
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else if (I == n - 1)
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else
{
if (!satisfies(I + 1, J - 1, A, B))
{
x[idx] = (I + 1);
y[idx] = (J);
flag = true;
}
else
{
right = mid;
}
}
}
}
left--;
if (!flag)
{
x[idx] = (A_start - left);
y[idx] = (n);
}
__syncthreads();
int end_x, end_y;
if (idx == num_of_threads - 1)
{
end_x = n;
end_y = n;
}
else
{
end_x = x[idx + 1];
end_y = y[idx + 1];
}
int cur_x = x[idx];
int cur_y = y[idx];
int put_at = cur_x + cur_y;
while (cur_x<end_x && cur_y<end_y)
{
if (A[cur_x] <= B[cur_y])
{
C[put_at++] = A[cur_x++];
}
else
{
C[put_at++] = B[cur_y++];
}
}
while (cur_x<end_x)
C[put_at++] = A[cur_x++];
while (cur_y<end_y)
C[put_at++] = B[cur_y++];
} |
8dda1e9d1694c07e0f75253b84edcb73a0ed7620.hip | // !!! This is a file automatically generated by hipify!!!
#include "copy.h"
#include <hip/hip_runtime.h>
#include "error.h"
namespace cutw
{
namespace detail
{
void copy_to_device_impl(const void* const host, void* const device,
const std::size_t size, Stream* const s)
{
if (s)
{
CUTW_CUASSERT(hipMemcpyAsync(device, host, size, hipMemcpyHostToDevice, s->get()));
}
else
{
CUTW_CUASSERT(hipMemcpy(device, host, size, hipMemcpyHostToDevice));
}
}
void copy_to_host_impl(void* const host, const void* const device,
const std::size_t size, Stream* const s)
{
if (s)
{
CUTW_CUASSERT(hipMemcpyAsync(host, device, size, hipMemcpyDeviceToHost, s->get()));
}
else
{
CUTW_CUASSERT(hipMemcpy(host, device, size, hipMemcpyDeviceToHost));
}
}
}
}
| 8dda1e9d1694c07e0f75253b84edcb73a0ed7620.cu | #include "copy.h"
#include <cuda.h>
#include "error.h"
namespace cutw
{
namespace detail
{
void copy_to_device_impl(const void* const host, void* const device,
const std::size_t size, Stream* const s)
{
if (s)
{
CUTW_CUASSERT(cudaMemcpyAsync(device, host, size, cudaMemcpyHostToDevice, s->get()));
}
else
{
CUTW_CUASSERT(cudaMemcpy(device, host, size, cudaMemcpyHostToDevice));
}
}
void copy_to_host_impl(void* const host, const void* const device,
const std::size_t size, Stream* const s)
{
if (s)
{
CUTW_CUASSERT(cudaMemcpyAsync(host, device, size, cudaMemcpyDeviceToHost, s->get()));
}
else
{
CUTW_CUASSERT(cudaMemcpy(host, device, size, cudaMemcpyDeviceToHost));
}
}
}
}
|
1d2df63a15df92dcd49044b607664c6d31896108.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test program to measure the memcopy bandwidth of the GPU.
* It can measure device to device copy bandwidth, host to device copy bandwidth
* for pageable and pinned memory, and device to host copy bandwidth for pageable
* and pinned memory.
*
* Usage:
* ./bandwidthTest [option]...
*/
// CUDA runtime
#include <hip/hip_runtime.h>
// includes
#include <helper_functions.h> // helper for shared functions common to CUDA Samples
#include <helper_cuda.h> // helper functions for CUDA error checking and initialization
#include <hip/hip_runtime.h>
#include <memory>
#include <iostream>
#include <cassert>
static char *sSDKsample = "CUDA Bandwidth Test";
// defines, project
#define MEMCOPY_ITERATIONS 10
#define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M
#define DEFAULT_INCREMENT (1 << 22) //4 M
#define CACHE_CLEAR_SIZE (1 << 24) //16 M
//shmoo mode defines
#define SHMOO_MEMSIZE_MAX (1 << 26) //64 M
#define SHMOO_MEMSIZE_START (1 << 10) //1 KB
#define SHMOO_INCREMENT_1KB (1 << 10) //1 KB
#define SHMOO_INCREMENT_2KB (1 << 11) //2 KB
#define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB
#define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_INCREMENT_1MB (1 << 20) //1 MB
#define SHMOO_INCREMENT_2MB (1 << 21) //2 MB
#define SHMOO_INCREMENT_4MB (1 << 22) //4 MB
#define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB
#define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB
#define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_LIMIT_1MB (1 << 20) //1 MB
#define SHMOO_LIMIT_16MB (1 << 24) //16 MB
#define SHMOO_LIMIT_32MB (1 << 25) //32 MB
//enums, project
enum testMode {
QUICK_MODE, RANGE_MODE, SHMOO_MODE
};
enum memcpyKind {
DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE
};
enum printMode {
USER_READABLE, CSV
};
enum memoryMode {
PINNED, PAGEABLE
};
const char *sMemoryCopyKind[] = { "Device to Host", "Host to Device",
"Device to Device", NULL };
const char *sMemoryMode[] = { "PINNED", "PAGEABLE", NULL };
// if true, use CPU based timing for everything
static bool bDontUseGPUTiming;
int *pArgc = NULL;
char **pArgv = NULL;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(const int argc, const char **argv);
void testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode,
int startDevice, int endDevice, bool wc);
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthRange(unsigned int start, unsigned int end,
unsigned int increment, memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthShmoo(memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc);
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode,
bool wc);
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode,
bool wc);
float testDeviceToDeviceTransfer(unsigned int memSize);
void printResultsReadable(unsigned int *memSizes, double *bandwidths,
unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs,
bool wc);
void printResultsCSV(unsigned int *memSizes, double *bandwidths,
unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs,
bool wc);
void printHelp(void);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
pArgc = &argc;
pArgv = argv;
// set logfile name and start logs
printf("[%s] - Starting...\n", sSDKsample);
int iRetVal = runTest(argc, (const char **) argv);
if (iRetVal < 0) {
checkCudaErrors(hipSetDevice(0));
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
}
// finish
printf("%s\n", (iRetVal == 0) ? "Result = PASS" : "Result = FAIL");
exit((iRetVal == 0) ? EXIT_SUCCESS : EXIT_FAILURE);
}
///////////////////////////////////////////////////////////////////////////////
//Parse args, run the appropriate tests
///////////////////////////////////////////////////////////////////////////////
int runTest(const int argc, const char **argv) {
int start = DEFAULT_SIZE;
int end = DEFAULT_SIZE;
int startDevice = 0;
int endDevice = 0;
int increment = DEFAULT_INCREMENT;
testMode mode = QUICK_MODE;
bool htod = false;
bool dtoh = false;
bool dtod = false;
bool wc = false;
char *modeStr;
char *device = NULL;
printMode printmode = USER_READABLE;
char *memModeStr = NULL;
memoryMode memMode = PINNED;
//process command line args
if (checkCmdLineFlag(argc, argv, "help")) {
printHelp();
return 0;
}
if (checkCmdLineFlag(argc, argv, "csv")) {
printmode = CSV;
}
if (getCmdLineArgumentString(argc, argv, "memory", &memModeStr)) {
if (strcmp(memModeStr, "pageable") == 0) {
memMode = PAGEABLE;
} else if (strcmp(memModeStr, "pinned") == 0) {
memMode = PINNED;
} else {
printf(
"Invalid memory mode - valid modes are pageable or pinned\n");
printf("See --help for more information\n");
return -1000;
}
} else {
//default - pinned memory
memMode = PINNED;
}
if (getCmdLineArgumentString(argc, argv, "device", &device)) {
int deviceCount;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
printf("hipGetDeviceCount returned %d\n-> %s\n", (int) error_id,
hipGetErrorString(error_id));
exit(EXIT_FAILURE);
}
if (deviceCount == 0) {
printf("!!!!!No devices found!!!!!\n");
return -2000;
}
if (strcmp(device, "all") == 0) {
printf(
"\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n");
startDevice = 0;
endDevice = deviceCount - 1;
} else {
startDevice = endDevice = atoi(device);
if (startDevice >= deviceCount || startDevice < 0) {
printf(
"\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n",
startDevice, 0);
startDevice = endDevice = 0;
}
}
}
printf("Running on...\n\n");
for (int currentDevice = startDevice; currentDevice <= endDevice;
currentDevice++) {
hipDeviceProp_t deviceProp;
hipError_t error_id = hipGetDeviceProperties(&deviceProp,
currentDevice);
if (error_id == hipSuccess) {
printf(" Device %d: %s\n", currentDevice, deviceProp.name);
if (deviceProp.computeMode == hipComputeModeProhibited) {
fprintf(stderr,
"Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
checkCudaErrors(hipSetDevice(currentDevice));
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(EXIT_FAILURE);
}
} else {
printf("hipGetDeviceProperties returned %d\n-> %s\n",
(int) error_id, hipGetErrorString(error_id));
checkCudaErrors(hipSetDevice(currentDevice));
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(EXIT_FAILURE);
}
}
if (getCmdLineArgumentString(argc, argv, "mode", &modeStr)) {
//figure out the mode
if (strcmp(modeStr, "quick") == 0) {
printf(" Quick Mode\n\n");
mode = QUICK_MODE;
} else if (strcmp(modeStr, "shmoo") == 0) {
printf(" Shmoo Mode\n\n");
mode = SHMOO_MODE;
} else if (strcmp(modeStr, "range") == 0) {
printf(" Range Mode\n\n");
mode = RANGE_MODE;
} else {
printf("Invalid mode - valid modes are quick, range, or shmoo\n");
printf("See --help for more information\n");
return -3000;
}
} else {
//default mode - quick
printf(" Quick Mode\n\n");
mode = QUICK_MODE;
}
if (checkCmdLineFlag(argc, argv, "htod")) {
htod = true;
}
if (checkCmdLineFlag(argc, argv, "dtoh")) {
dtoh = true;
}
if (checkCmdLineFlag(argc, argv, "dtod")) {
dtod = true;
}
#if CUDART_VERSION >= 2020
if (checkCmdLineFlag(argc, argv, "wc"))
{
wc = true;
}
#endif
if (checkCmdLineFlag(argc, argv, "cputiming")) {
bDontUseGPUTiming = true;
}
if (!htod && !dtoh && !dtod) {
//default: All
htod = true;
dtoh = true;
dtod = true;
}
if (RANGE_MODE == mode) {
if (checkCmdLineFlag(argc, (const char **) argv, "start")) {
start = getCmdLineArgumentInt(argc, argv, "start");
if (start <= 0) {
printf("Illegal argument - start must be greater than zero\n");
return -4000;
}
} else {
printf("Must specify a starting size in range mode\n");
printf("See --help for more information\n");
return -5000;
}
if (checkCmdLineFlag(argc, (const char **) argv, "end")) {
end = getCmdLineArgumentInt(argc, argv, "end");
if (end <= 0) {
printf("Illegal argument - end must be greater than zero\n");
return -6000;
}
if (start > end) {
printf("Illegal argument - start is greater than end\n");
return -7000;
}
} else {
printf("Must specify an end size in range mode.\n");
printf("See --help for more information\n");
return -8000;
}
if (checkCmdLineFlag(argc, argv, "increment")) {
increment = getCmdLineArgumentInt(argc, argv, "increment");
if (increment <= 0) {
printf(
"Illegal argument - increment must be greater than zero\n");
return -9000;
}
} else {
printf("Must specify an increment in user mode\n");
printf("See --help for more information\n");
return -10000;
}
}
if (htod) {
testBandwidth((unsigned int) start, (unsigned int) end,
(unsigned int) increment, mode, HOST_TO_DEVICE, printmode,
memMode, startDevice, endDevice, wc);
}
if (dtoh) {
testBandwidth((unsigned int) start, (unsigned int) end,
(unsigned int) increment, mode, DEVICE_TO_HOST, printmode,
memMode, startDevice, endDevice, wc);
}
if (dtod) {
testBandwidth((unsigned int) start, (unsigned int) end,
(unsigned int) increment, mode, DEVICE_TO_DEVICE, printmode,
memMode, startDevice, endDevice, wc);
}
// Ensure that we reset all CUDA Devices in question
for (int nDevice = startDevice; nDevice <= endDevice; nDevice++) {
hipSetDevice(nDevice);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
// Run a bandwidth test
///////////////////////////////////////////////////////////////////////////////
void testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode,
int startDevice, int endDevice, bool wc) {
switch (mode) {
case QUICK_MODE:
testBandwidthQuick(DEFAULT_SIZE, kind, printmode, memMode, startDevice,
endDevice, wc);
break;
case RANGE_MODE:
testBandwidthRange(start, end, increment, kind, printmode, memMode,
startDevice, endDevice, wc);
break;
case SHMOO_MODE:
testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice,
wc);
break;
default:
break;
}
}
//////////////////////////////////////////////////////////////////////
// Run a quick mode bandwidth test
//////////////////////////////////////////////////////////////////////
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc) {
testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode,
startDevice, endDevice, wc);
}
///////////////////////////////////////////////////////////////////////
// Run a range mode bandwidth test
//////////////////////////////////////////////////////////////////////
void testBandwidthRange(unsigned int start, unsigned int end,
unsigned int increment, memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc) {
//count the number of copies we're going to run
unsigned int count = 1 + ((end - start) / increment);
unsigned int *memSizes = (unsigned int *) malloc(
count * sizeof(unsigned int));
double *bandwidths = (double *) malloc(count * sizeof(double));
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++) {
bandwidths[i] = 0.0;
}
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice;
currentDevice++) {
hipSetDevice(currentDevice);
//run each of the copies
for (unsigned int i = 0; i < count; i++) {
memSizes[i] = start + i * increment;
switch (kind) {
case DEVICE_TO_HOST:
bandwidths[i] += testDeviceToHostTransfer(memSizes[i], memMode,
wc);
break;
case HOST_TO_DEVICE:
bandwidths[i] += testHostToDeviceTransfer(memSizes[i], memMode,
wc);
break;
case DEVICE_TO_DEVICE:
bandwidths[i] += testDeviceToDeviceTransfer(memSizes[i]);
break;
}
}
} // Complete the bandwidth computation on all the devices
//print results
if (printmode == CSV) {
printResultsCSV(memSizes, bandwidths, count, kind, memMode,
(1 + endDevice - startDevice), wc);
} else {
printResultsReadable(memSizes, bandwidths, count, kind, memMode,
(1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
//////////////////////////////////////////////////////////////////////////////
// Intense shmoo mode - covers a large range of values with varying increments
//////////////////////////////////////////////////////////////////////////////
void testBandwidthShmoo(memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc) {
//count the number of copies to make
unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB)
+ ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB)/ SHMOO_INCREMENT_2KB)
+ ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB)/ SHMOO_INCREMENT_10KB)
+ ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB)/ SHMOO_INCREMENT_100KB)
+ ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB)/ SHMOO_INCREMENT_1MB)
+ ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB)/ SHMOO_INCREMENT_2MB)
+ ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB)/ SHMOO_INCREMENT_4MB);
unsigned int *memSizes = (unsigned int *) malloc(
count * sizeof(unsigned int));
double *bandwidths = (double *) malloc(count * sizeof(double));
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++) {
bandwidths[i] = 0.0;
}
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice;
currentDevice++) {
hipSetDevice(currentDevice);
//Run the shmoo
int iteration = 0;
unsigned int memSize = 0;
while (memSize <= SHMOO_MEMSIZE_MAX) {
if (memSize < SHMOO_LIMIT_20KB) {
memSize += SHMOO_INCREMENT_1KB;
} else if (memSize < SHMOO_LIMIT_50KB) {
memSize += SHMOO_INCREMENT_2KB;
} else if (memSize < SHMOO_LIMIT_100KB) {
memSize += SHMOO_INCREMENT_10KB;
} else if (memSize < SHMOO_LIMIT_1MB) {
memSize += SHMOO_INCREMENT_100KB;
} else if (memSize < SHMOO_LIMIT_16MB) {
memSize += SHMOO_INCREMENT_1MB;
} else if (memSize < SHMOO_LIMIT_32MB) {
memSize += SHMOO_INCREMENT_2MB;
} else {
memSize += SHMOO_INCREMENT_4MB;
}
memSizes[iteration] = memSize;
switch (kind) {
case DEVICE_TO_HOST:
bandwidths[iteration] += testDeviceToHostTransfer(
memSizes[iteration], memMode, wc);
break;
case HOST_TO_DEVICE:
bandwidths[iteration] += testHostToDeviceTransfer(
memSizes[iteration], memMode, wc);
break;
case DEVICE_TO_DEVICE:
bandwidths[iteration] += testDeviceToDeviceTransfer(
memSizes[iteration]);
break;
}
iteration++;
printf(".");
}
} // Complete the bandwidth computation on all the devices
//print results
printf("\n");
if (CSV == printmode) {
printResultsCSV(memSizes, bandwidths, count, kind, memMode,
(1 + endDevice - startDevice), wc);
} else {
printResultsReadable(memSizes, bandwidths, count, kind, memMode,
(1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
///////////////////////////////////////////////////////////////////////////////
// test the bandwidth of a device to host memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode,
bool wc) {
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
unsigned char *h_idata = NULL;
unsigned char *h_odata = NULL;
hipEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//allocate host memory
if (PINNED == memMode) {
//pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
checkCudaErrors(hipHostMalloc((void **)&h_idata, memSize, (wc) ? hipHostMallocWriteCombined : 0));
checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0));
#else
checkCudaErrors(hipHostMalloc((void ** )&h_idata, memSize));
checkCudaErrors(hipHostMalloc((void ** )&h_odata, memSize));
#endif
} else {
//pageable memory mode - use malloc
h_idata = (unsigned char *) malloc(memSize);
h_odata = (unsigned char *) malloc(memSize);
if (h_idata == 0 || h_odata == 0) {
fprintf(stderr,
"Not enough memory avaialable on host to run test!\n");
exit(EXIT_FAILURE);
}
}
//initialize the memory
for (unsigned int i = 0; i < memSize / sizeof(unsigned char); i++) {
h_idata[i] = (unsigned char) (i & 0xff);
}
// allocate device memory
unsigned char *d_idata;
checkCudaErrors(hipMalloc((void ** ) &d_idata, memSize));
//initialize the device memory
checkCudaErrors(
hipMemcpy(d_idata, h_idata, memSize, hipMemcpyHostToDevice));
//copy data from GPU to Host
sdkStartTimer(&timer);
checkCudaErrors(hipEventRecord(start, 0));
if (PINNED == memMode) {
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(
hipMemcpyAsync(h_odata, d_idata, memSize,
hipMemcpyDeviceToHost, 0));
}
} else {
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(
hipMemcpy(h_odata, d_idata, memSize,
hipMemcpyDeviceToHost));
}
}
checkCudaErrors(hipEventRecord(stop, 0));
// make sure GPU has finished copying
checkCudaErrors(hipDeviceSynchronize());
//get the the total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop));
if (PINNED != memMode || bDontUseGPUTiming) {
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = ((float) (1 << 10) * memSize * (float) MEMCOPY_ITERATIONS)
/ (elapsedTimeInMs * (float) (1 << 20));
//clean up memory
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipEventDestroy(start));
sdkDeleteTimer(&timer);
if (PINNED == memMode) {
checkCudaErrors(hipHostFree(h_idata));
checkCudaErrors(hipHostFree(h_odata));
} else {
free(h_idata);
free(h_odata);
}
checkCudaErrors(hipFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a host to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode,
bool wc) {
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
hipEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//allocate host memory
unsigned char *h_odata = NULL;
if (PINNED == memMode) {
#if CUDART_VERSION >= 2020
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0));
#else
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors(hipHostMalloc((void ** )&h_odata, memSize));
#endif
} else {
//pageable memory mode - use malloc
h_odata = (unsigned char *) malloc(memSize);
if (h_odata == 0) {
fprintf(stderr,
"Not enough memory available on host to run test!\n");
exit(EXIT_FAILURE);
}
}
unsigned char *h_cacheClear1 = (unsigned char *) malloc(CACHE_CLEAR_SIZE);
unsigned char *h_cacheClear2 = (unsigned char *) malloc(CACHE_CLEAR_SIZE);
if (h_cacheClear1 == 0 || h_cacheClear1 == 0) {
fprintf(stderr, "Not enough memory available on host to run test!\n");
exit(EXIT_FAILURE);
}
//initialize the memory
for (unsigned int i = 0; i < memSize / sizeof(unsigned char); i++) {
h_odata[i] = (unsigned char) (i & 0xff);
}
for (unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char);
i++) {
h_cacheClear1[i] = (unsigned char) (i & 0xff);
h_cacheClear2[i] = (unsigned char) (0xff - (i & 0xff));
}
//allocate device memory
unsigned char *d_idata;
checkCudaErrors(hipMalloc((void ** ) &d_idata, memSize));
sdkStartTimer(&timer);
checkCudaErrors(hipEventRecord(start, 0));
//copy host memory to device memory
if (PINNED == memMode) {
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(
hipMemcpyAsync(d_idata, h_odata, memSize,
hipMemcpyHostToDevice, 0));
}
} else {
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(
hipMemcpy(d_idata, h_odata, memSize,
hipMemcpyHostToDevice));
}
}
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipDeviceSynchronize());
//total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop));
if (PINNED != memMode || bDontUseGPUTiming) {
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
sdkResetTimer(&timer);
//calculate bandwidth in MB/s
bandwidthInMBs = ((float) (1 << 10) * memSize * (float) MEMCOPY_ITERATIONS)
/ (elapsedTimeInMs * (float) (1 << 20));
//clean up memory
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipEventDestroy(start));
sdkDeleteTimer(&timer);
if (PINNED == memMode) {
checkCudaErrors(hipHostFree(h_odata));
} else {
free(h_odata);
}
free(h_cacheClear1);
free(h_cacheClear2);
checkCudaErrors(hipFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a device to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float testDeviceToDeviceTransfer(unsigned int memSize) {
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
hipEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//allocate host memory
unsigned char *h_idata = (unsigned char *) malloc(memSize);
if (h_idata == 0) {
fprintf(stderr, "Not enough memory avaialable on host to run test!\n");
exit(EXIT_FAILURE);
}
//initialize the host memory
for (unsigned int i = 0; i < memSize / sizeof(unsigned char); i++) {
h_idata[i] = (unsigned char) (i & 0xff);
}
//allocate device memory
unsigned char *d_idata;
checkCudaErrors(hipMalloc((void ** ) &d_idata, memSize));
unsigned char *d_odata;
checkCudaErrors(hipMalloc((void ** ) &d_odata, memSize));
//initialize memory
checkCudaErrors(
hipMemcpy(d_idata, h_idata, memSize, hipMemcpyHostToDevice));
//run the memcopy
sdkStartTimer(&timer);
checkCudaErrors(hipEventRecord(start, 0));
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(
hipMemcpy(d_odata, d_idata, memSize,
hipMemcpyDeviceToDevice));
}
checkCudaErrors(hipEventRecord(stop, 0));
//Since device to device memory copies are non-blocking,
//hipDeviceSynchronize() is required in order to get
//proper timing.
checkCudaErrors(hipDeviceSynchronize());
//get the the total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop));
if (bDontUseGPUTiming) {
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = 2.0f
* ((float) (1 << 10) * memSize * (float) MEMCOPY_ITERATIONS)
/ (elapsedTimeInMs * (float) (1 << 20));
//clean up memory
sdkDeleteTimer(&timer);
free(h_idata);
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
return bandwidthInMBs;
}
/////////////////////////////////////////////////////////
//print results in an easily read format
////////////////////////////////////////////////////////
void printResultsReadable(unsigned int *memSizes, double *bandwidths,
unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs,
bool wc) {
printf(" %s Bandwidth, %i Device(s)\n", sMemoryCopyKind[kind], iNumDevs);
printf(" %s Memory Transfers\n", sMemoryMode[memMode]);
if (wc) {
printf(" Write-Combined Memory Writes are Enabled");
}
printf(" Transfer Size (Bytes)\tBandwidth(MB/s)\n");
unsigned int i;
for (i = 0; i < (count - 1); i++) {
printf(" %u\t\t\t%s%.1f\n", memSizes[i],
(memSizes[i] < 10000) ? "\t" : "", bandwidths[i]);
}
printf(" %u\t\t\t%s%.1f\n\n", memSizes[i],
(memSizes[i] < 10000) ? "\t" : "", bandwidths[i]);
}
///////////////////////////////////////////////////////////////////////////
//print results in a database format
///////////////////////////////////////////////////////////////////////////
void printResultsCSV(unsigned int *memSizes, double *bandwidths,
unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs,
bool wc) {
std::string sConfig;
// log config information
if (kind == DEVICE_TO_DEVICE) {
sConfig += "D2D";
} else {
if (kind == DEVICE_TO_HOST) {
sConfig += "D2H";
} else if (kind == HOST_TO_DEVICE) {
sConfig += "H2D";
}
if (memMode == PAGEABLE) {
sConfig += "-Paged";
} else if (memMode == PINNED) {
sConfig += "-Pinned";
if (wc) {
sConfig += "-WriteCombined";
}
}
}
unsigned int i;
double dSeconds = 0.0;
for (i = 0; i < count; i++) {
dSeconds = (double) memSizes[i] / (bandwidths[i] * (double) (1 << 20));
printf(
"bandwidthTest-%s, Bandwidth = %.1f MB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n",
sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i],
iNumDevs);
}
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void) {
printf("Usage: bandwidthTest [OPTION]...\n");
printf(
"Test the bandwidth for device to host, host to device, and device to device transfers\n");
printf("\n");
printf(
"Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n");
printf(
"./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n");
printf("\n");
printf("Options:\n");
printf("--help\tDisplay this help menu\n");
printf("--csv\tPrint results as a CSV\n");
printf("--device=[deviceno]\tSpecify the device device to be used\n");
printf(" all - compute cumulative bandwidth on all the devices\n");
printf(" 0,1,2,...,n - Specify any particular device to be used\n");
printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n");
printf(" pageable - pageable memory\n");
printf(" pinned - non-pageable system memory\n");
printf("--mode=[MODE]\tSpecify the mode to use\n");
printf(" quick - performs a quick measurement\n");
printf(" range - measures a user-specified range of values\n");
printf(" shmoo - performs an intense shmoo of a large range of values\n");
printf("--htod\tMeasure host to device transfers\n");
printf("--dtoh\tMeasure device to host transfers\n");
printf("--dtod\tMeasure device to device transfers\n");
#if CUDART_VERSION >= 2020
printf("--wc\tAllocate pinned memory as write-combined\n");
#endif
printf("--cputiming\tForce CPU-based timing always\n");
printf("Range mode options\n");
printf("--start=[SIZE]\tStarting transfer size in bytes\n");
printf("--end=[SIZE]\tEnding transfer size in bytes\n");
printf("--increment=[SIZE]\tIncrement size in bytes\n");
}
| 1d2df63a15df92dcd49044b607664c6d31896108.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test program to measure the memcopy bandwidth of the GPU.
* It can measure device to device copy bandwidth, host to device copy bandwidth
* for pageable and pinned memory, and device to host copy bandwidth for pageable
* and pinned memory.
*
* Usage:
* ./bandwidthTest [option]...
*/
// CUDA runtime
#include <cuda_runtime.h>
// includes
#include <helper_functions.h> // helper for shared functions common to CUDA Samples
#include <helper_cuda.h> // helper functions for CUDA error checking and initialization
#include <cuda.h>
#include <memory>
#include <iostream>
#include <cassert>
static char *sSDKsample = "CUDA Bandwidth Test";
// defines, project
#define MEMCOPY_ITERATIONS 10
#define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M
#define DEFAULT_INCREMENT (1 << 22) //4 M
#define CACHE_CLEAR_SIZE (1 << 24) //16 M
//shmoo mode defines
#define SHMOO_MEMSIZE_MAX (1 << 26) //64 M
#define SHMOO_MEMSIZE_START (1 << 10) //1 KB
#define SHMOO_INCREMENT_1KB (1 << 10) //1 KB
#define SHMOO_INCREMENT_2KB (1 << 11) //2 KB
#define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB
#define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_INCREMENT_1MB (1 << 20) //1 MB
#define SHMOO_INCREMENT_2MB (1 << 21) //2 MB
#define SHMOO_INCREMENT_4MB (1 << 22) //4 MB
#define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB
#define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB
#define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_LIMIT_1MB (1 << 20) //1 MB
#define SHMOO_LIMIT_16MB (1 << 24) //16 MB
#define SHMOO_LIMIT_32MB (1 << 25) //32 MB
//enums, project
enum testMode {
QUICK_MODE, RANGE_MODE, SHMOO_MODE
};
enum memcpyKind {
DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE
};
enum printMode {
USER_READABLE, CSV
};
enum memoryMode {
PINNED, PAGEABLE
};
const char *sMemoryCopyKind[] = { "Device to Host", "Host to Device",
"Device to Device", NULL };
const char *sMemoryMode[] = { "PINNED", "PAGEABLE", NULL };
// if true, use CPU based timing for everything
static bool bDontUseGPUTiming;
int *pArgc = NULL;
char **pArgv = NULL;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(const int argc, const char **argv);
void testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode,
int startDevice, int endDevice, bool wc);
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthRange(unsigned int start, unsigned int end,
unsigned int increment, memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthShmoo(memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc);
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode,
bool wc);
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode,
bool wc);
float testDeviceToDeviceTransfer(unsigned int memSize);
void printResultsReadable(unsigned int *memSizes, double *bandwidths,
unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs,
bool wc);
void printResultsCSV(unsigned int *memSizes, double *bandwidths,
unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs,
bool wc);
void printHelp(void);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
pArgc = &argc;
pArgv = argv;
// set logfile name and start logs
printf("[%s] - Starting...\n", sSDKsample);
int iRetVal = runTest(argc, (const char **) argv);
if (iRetVal < 0) {
checkCudaErrors(cudaSetDevice(0));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
}
// finish
printf("%s\n", (iRetVal == 0) ? "Result = PASS" : "Result = FAIL");
exit((iRetVal == 0) ? EXIT_SUCCESS : EXIT_FAILURE);
}
///////////////////////////////////////////////////////////////////////////////
//Parse args, run the appropriate tests
///////////////////////////////////////////////////////////////////////////////
int runTest(const int argc, const char **argv) {
int start = DEFAULT_SIZE;
int end = DEFAULT_SIZE;
int startDevice = 0;
int endDevice = 0;
int increment = DEFAULT_INCREMENT;
testMode mode = QUICK_MODE;
bool htod = false;
bool dtoh = false;
bool dtod = false;
bool wc = false;
char *modeStr;
char *device = NULL;
printMode printmode = USER_READABLE;
char *memModeStr = NULL;
memoryMode memMode = PINNED;
//process command line args
if (checkCmdLineFlag(argc, argv, "help")) {
printHelp();
return 0;
}
if (checkCmdLineFlag(argc, argv, "csv")) {
printmode = CSV;
}
if (getCmdLineArgumentString(argc, argv, "memory", &memModeStr)) {
if (strcmp(memModeStr, "pageable") == 0) {
memMode = PAGEABLE;
} else if (strcmp(memModeStr, "pinned") == 0) {
memMode = PINNED;
} else {
printf(
"Invalid memory mode - valid modes are pageable or pinned\n");
printf("See --help for more information\n");
return -1000;
}
} else {
//default - pinned memory
memMode = PINNED;
}
if (getCmdLineArgumentString(argc, argv, "device", &device)) {
int deviceCount;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int) error_id,
cudaGetErrorString(error_id));
exit(EXIT_FAILURE);
}
if (deviceCount == 0) {
printf("!!!!!No devices found!!!!!\n");
return -2000;
}
if (strcmp(device, "all") == 0) {
printf(
"\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n");
startDevice = 0;
endDevice = deviceCount - 1;
} else {
startDevice = endDevice = atoi(device);
if (startDevice >= deviceCount || startDevice < 0) {
printf(
"\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n",
startDevice, 0);
startDevice = endDevice = 0;
}
}
}
printf("Running on...\n\n");
for (int currentDevice = startDevice; currentDevice <= endDevice;
currentDevice++) {
cudaDeviceProp deviceProp;
cudaError_t error_id = cudaGetDeviceProperties(&deviceProp,
currentDevice);
if (error_id == cudaSuccess) {
printf(" Device %d: %s\n", currentDevice, deviceProp.name);
if (deviceProp.computeMode == cudaComputeModeProhibited) {
fprintf(stderr,
"Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
checkCudaErrors(cudaSetDevice(currentDevice));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(EXIT_FAILURE);
}
} else {
printf("cudaGetDeviceProperties returned %d\n-> %s\n",
(int) error_id, cudaGetErrorString(error_id));
checkCudaErrors(cudaSetDevice(currentDevice));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
if (getCmdLineArgumentString(argc, argv, "mode", &modeStr)) {
//figure out the mode
if (strcmp(modeStr, "quick") == 0) {
printf(" Quick Mode\n\n");
mode = QUICK_MODE;
} else if (strcmp(modeStr, "shmoo") == 0) {
printf(" Shmoo Mode\n\n");
mode = SHMOO_MODE;
} else if (strcmp(modeStr, "range") == 0) {
printf(" Range Mode\n\n");
mode = RANGE_MODE;
} else {
printf("Invalid mode - valid modes are quick, range, or shmoo\n");
printf("See --help for more information\n");
return -3000;
}
} else {
//default mode - quick
printf(" Quick Mode\n\n");
mode = QUICK_MODE;
}
if (checkCmdLineFlag(argc, argv, "htod")) {
htod = true;
}
if (checkCmdLineFlag(argc, argv, "dtoh")) {
dtoh = true;
}
if (checkCmdLineFlag(argc, argv, "dtod")) {
dtod = true;
}
#if CUDART_VERSION >= 2020
if (checkCmdLineFlag(argc, argv, "wc"))
{
wc = true;
}
#endif
if (checkCmdLineFlag(argc, argv, "cputiming")) {
bDontUseGPUTiming = true;
}
if (!htod && !dtoh && !dtod) {
//default: All
htod = true;
dtoh = true;
dtod = true;
}
if (RANGE_MODE == mode) {
if (checkCmdLineFlag(argc, (const char **) argv, "start")) {
start = getCmdLineArgumentInt(argc, argv, "start");
if (start <= 0) {
printf("Illegal argument - start must be greater than zero\n");
return -4000;
}
} else {
printf("Must specify a starting size in range mode\n");
printf("See --help for more information\n");
return -5000;
}
if (checkCmdLineFlag(argc, (const char **) argv, "end")) {
end = getCmdLineArgumentInt(argc, argv, "end");
if (end <= 0) {
printf("Illegal argument - end must be greater than zero\n");
return -6000;
}
if (start > end) {
printf("Illegal argument - start is greater than end\n");
return -7000;
}
} else {
printf("Must specify an end size in range mode.\n");
printf("See --help for more information\n");
return -8000;
}
if (checkCmdLineFlag(argc, argv, "increment")) {
increment = getCmdLineArgumentInt(argc, argv, "increment");
if (increment <= 0) {
printf(
"Illegal argument - increment must be greater than zero\n");
return -9000;
}
} else {
printf("Must specify an increment in user mode\n");
printf("See --help for more information\n");
return -10000;
}
}
if (htod) {
testBandwidth((unsigned int) start, (unsigned int) end,
(unsigned int) increment, mode, HOST_TO_DEVICE, printmode,
memMode, startDevice, endDevice, wc);
}
if (dtoh) {
testBandwidth((unsigned int) start, (unsigned int) end,
(unsigned int) increment, mode, DEVICE_TO_HOST, printmode,
memMode, startDevice, endDevice, wc);
}
if (dtod) {
testBandwidth((unsigned int) start, (unsigned int) end,
(unsigned int) increment, mode, DEVICE_TO_DEVICE, printmode,
memMode, startDevice, endDevice, wc);
}
// Ensure that we reset all CUDA Devices in question
for (int nDevice = startDevice; nDevice <= endDevice; nDevice++) {
cudaSetDevice(nDevice);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
// Run a bandwidth test
///////////////////////////////////////////////////////////////////////////////
void testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode,
int startDevice, int endDevice, bool wc) {
switch (mode) {
case QUICK_MODE:
testBandwidthQuick(DEFAULT_SIZE, kind, printmode, memMode, startDevice,
endDevice, wc);
break;
case RANGE_MODE:
testBandwidthRange(start, end, increment, kind, printmode, memMode,
startDevice, endDevice, wc);
break;
case SHMOO_MODE:
testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice,
wc);
break;
default:
break;
}
}
//////////////////////////////////////////////////////////////////////
// Run a quick mode bandwidth test
//////////////////////////////////////////////////////////////////////
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc) {
testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode,
startDevice, endDevice, wc);
}
///////////////////////////////////////////////////////////////////////
// Run a range mode bandwidth test
//////////////////////////////////////////////////////////////////////
void testBandwidthRange(unsigned int start, unsigned int end,
unsigned int increment, memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc) {
//count the number of copies we're going to run
unsigned int count = 1 + ((end - start) / increment);
unsigned int *memSizes = (unsigned int *) malloc(
count * sizeof(unsigned int));
double *bandwidths = (double *) malloc(count * sizeof(double));
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++) {
bandwidths[i] = 0.0;
}
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice;
currentDevice++) {
cudaSetDevice(currentDevice);
//run each of the copies
for (unsigned int i = 0; i < count; i++) {
memSizes[i] = start + i * increment;
switch (kind) {
case DEVICE_TO_HOST:
bandwidths[i] += testDeviceToHostTransfer(memSizes[i], memMode,
wc);
break;
case HOST_TO_DEVICE:
bandwidths[i] += testHostToDeviceTransfer(memSizes[i], memMode,
wc);
break;
case DEVICE_TO_DEVICE:
bandwidths[i] += testDeviceToDeviceTransfer(memSizes[i]);
break;
}
}
} // Complete the bandwidth computation on all the devices
//print results
if (printmode == CSV) {
printResultsCSV(memSizes, bandwidths, count, kind, memMode,
(1 + endDevice - startDevice), wc);
} else {
printResultsReadable(memSizes, bandwidths, count, kind, memMode,
(1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
//////////////////////////////////////////////////////////////////////////////
// Intense shmoo mode - covers a large range of values with varying increments
//////////////////////////////////////////////////////////////////////////////
void testBandwidthShmoo(memcpyKind kind, printMode printmode,
memoryMode memMode, int startDevice, int endDevice, bool wc) {
//count the number of copies to make
unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB)
+ ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB)/ SHMOO_INCREMENT_2KB)
+ ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB)/ SHMOO_INCREMENT_10KB)
+ ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB)/ SHMOO_INCREMENT_100KB)
+ ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB)/ SHMOO_INCREMENT_1MB)
+ ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB)/ SHMOO_INCREMENT_2MB)
+ ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB)/ SHMOO_INCREMENT_4MB);
unsigned int *memSizes = (unsigned int *) malloc(
count * sizeof(unsigned int));
double *bandwidths = (double *) malloc(count * sizeof(double));
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++) {
bandwidths[i] = 0.0;
}
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice;
currentDevice++) {
cudaSetDevice(currentDevice);
//Run the shmoo
int iteration = 0;
unsigned int memSize = 0;
while (memSize <= SHMOO_MEMSIZE_MAX) {
if (memSize < SHMOO_LIMIT_20KB) {
memSize += SHMOO_INCREMENT_1KB;
} else if (memSize < SHMOO_LIMIT_50KB) {
memSize += SHMOO_INCREMENT_2KB;
} else if (memSize < SHMOO_LIMIT_100KB) {
memSize += SHMOO_INCREMENT_10KB;
} else if (memSize < SHMOO_LIMIT_1MB) {
memSize += SHMOO_INCREMENT_100KB;
} else if (memSize < SHMOO_LIMIT_16MB) {
memSize += SHMOO_INCREMENT_1MB;
} else if (memSize < SHMOO_LIMIT_32MB) {
memSize += SHMOO_INCREMENT_2MB;
} else {
memSize += SHMOO_INCREMENT_4MB;
}
memSizes[iteration] = memSize;
switch (kind) {
case DEVICE_TO_HOST:
bandwidths[iteration] += testDeviceToHostTransfer(
memSizes[iteration], memMode, wc);
break;
case HOST_TO_DEVICE:
bandwidths[iteration] += testHostToDeviceTransfer(
memSizes[iteration], memMode, wc);
break;
case DEVICE_TO_DEVICE:
bandwidths[iteration] += testDeviceToDeviceTransfer(
memSizes[iteration]);
break;
}
iteration++;
printf(".");
}
} // Complete the bandwidth computation on all the devices
//print results
printf("\n");
if (CSV == printmode) {
printResultsCSV(memSizes, bandwidths, count, kind, memMode,
(1 + endDevice - startDevice), wc);
} else {
printResultsReadable(memSizes, bandwidths, count, kind, memMode,
(1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
///////////////////////////////////////////////////////////////////////////////
// test the bandwidth of a device to host memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode,
bool wc) {
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
unsigned char *h_idata = NULL;
unsigned char *h_odata = NULL;
cudaEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//allocate host memory
if (PINNED == memMode) {
//pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
checkCudaErrors(cudaHostAlloc((void **)&h_idata, memSize, (wc) ? cudaHostAllocWriteCombined : 0));
checkCudaErrors(cudaHostAlloc((void **)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0));
#else
checkCudaErrors(cudaMallocHost((void ** )&h_idata, memSize));
checkCudaErrors(cudaMallocHost((void ** )&h_odata, memSize));
#endif
} else {
//pageable memory mode - use malloc
h_idata = (unsigned char *) malloc(memSize);
h_odata = (unsigned char *) malloc(memSize);
if (h_idata == 0 || h_odata == 0) {
fprintf(stderr,
"Not enough memory avaialable on host to run test!\n");
exit(EXIT_FAILURE);
}
}
//initialize the memory
for (unsigned int i = 0; i < memSize / sizeof(unsigned char); i++) {
h_idata[i] = (unsigned char) (i & 0xff);
}
// allocate device memory
unsigned char *d_idata;
checkCudaErrors(cudaMalloc((void ** ) &d_idata, memSize));
//initialize the device memory
checkCudaErrors(
cudaMemcpy(d_idata, h_idata, memSize, cudaMemcpyHostToDevice));
//copy data from GPU to Host
sdkStartTimer(&timer);
checkCudaErrors(cudaEventRecord(start, 0));
if (PINNED == memMode) {
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(
cudaMemcpyAsync(h_odata, d_idata, memSize,
cudaMemcpyDeviceToHost, 0));
}
} else {
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(
cudaMemcpy(h_odata, d_idata, memSize,
cudaMemcpyDeviceToHost));
}
}
checkCudaErrors(cudaEventRecord(stop, 0));
// make sure GPU has finished copying
checkCudaErrors(cudaDeviceSynchronize());
//get the the total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop));
if (PINNED != memMode || bDontUseGPUTiming) {
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = ((float) (1 << 10) * memSize * (float) MEMCOPY_ITERATIONS)
/ (elapsedTimeInMs * (float) (1 << 20));
//clean up memory
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaEventDestroy(start));
sdkDeleteTimer(&timer);
if (PINNED == memMode) {
checkCudaErrors(cudaFreeHost(h_idata));
checkCudaErrors(cudaFreeHost(h_odata));
} else {
free(h_idata);
free(h_odata);
}
checkCudaErrors(cudaFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a host to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode,
bool wc) {
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
cudaEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//allocate host memory
unsigned char *h_odata = NULL;
if (PINNED == memMode) {
#if CUDART_VERSION >= 2020
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors(cudaHostAlloc((void **)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0));
#else
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors(cudaMallocHost((void ** )&h_odata, memSize));
#endif
} else {
//pageable memory mode - use malloc
h_odata = (unsigned char *) malloc(memSize);
if (h_odata == 0) {
fprintf(stderr,
"Not enough memory available on host to run test!\n");
exit(EXIT_FAILURE);
}
}
unsigned char *h_cacheClear1 = (unsigned char *) malloc(CACHE_CLEAR_SIZE);
unsigned char *h_cacheClear2 = (unsigned char *) malloc(CACHE_CLEAR_SIZE);
if (h_cacheClear1 == 0 || h_cacheClear1 == 0) {
fprintf(stderr, "Not enough memory available on host to run test!\n");
exit(EXIT_FAILURE);
}
//initialize the memory
for (unsigned int i = 0; i < memSize / sizeof(unsigned char); i++) {
h_odata[i] = (unsigned char) (i & 0xff);
}
for (unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char);
i++) {
h_cacheClear1[i] = (unsigned char) (i & 0xff);
h_cacheClear2[i] = (unsigned char) (0xff - (i & 0xff));
}
//allocate device memory
unsigned char *d_idata;
checkCudaErrors(cudaMalloc((void ** ) &d_idata, memSize));
sdkStartTimer(&timer);
checkCudaErrors(cudaEventRecord(start, 0));
//copy host memory to device memory
if (PINNED == memMode) {
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(
cudaMemcpyAsync(d_idata, h_odata, memSize,
cudaMemcpyHostToDevice, 0));
}
} else {
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(
cudaMemcpy(d_idata, h_odata, memSize,
cudaMemcpyHostToDevice));
}
}
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaDeviceSynchronize());
//total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop));
if (PINNED != memMode || bDontUseGPUTiming) {
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
sdkResetTimer(&timer);
//calculate bandwidth in MB/s
bandwidthInMBs = ((float) (1 << 10) * memSize * (float) MEMCOPY_ITERATIONS)
/ (elapsedTimeInMs * (float) (1 << 20));
//clean up memory
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaEventDestroy(start));
sdkDeleteTimer(&timer);
if (PINNED == memMode) {
checkCudaErrors(cudaFreeHost(h_odata));
} else {
free(h_odata);
}
free(h_cacheClear1);
free(h_cacheClear2);
checkCudaErrors(cudaFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a device to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float testDeviceToDeviceTransfer(unsigned int memSize) {
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
cudaEvent_t start, stop;
sdkCreateTimer(&timer);
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//allocate host memory
unsigned char *h_idata = (unsigned char *) malloc(memSize);
if (h_idata == 0) {
fprintf(stderr, "Not enough memory avaialable on host to run test!\n");
exit(EXIT_FAILURE);
}
//initialize the host memory
for (unsigned int i = 0; i < memSize / sizeof(unsigned char); i++) {
h_idata[i] = (unsigned char) (i & 0xff);
}
//allocate device memory
unsigned char *d_idata;
checkCudaErrors(cudaMalloc((void ** ) &d_idata, memSize));
unsigned char *d_odata;
checkCudaErrors(cudaMalloc((void ** ) &d_odata, memSize));
//initialize memory
checkCudaErrors(
cudaMemcpy(d_idata, h_idata, memSize, cudaMemcpyHostToDevice));
//run the memcopy
sdkStartTimer(&timer);
checkCudaErrors(cudaEventRecord(start, 0));
for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(
cudaMemcpy(d_odata, d_idata, memSize,
cudaMemcpyDeviceToDevice));
}
checkCudaErrors(cudaEventRecord(stop, 0));
//Since device to device memory copies are non-blocking,
//cudaDeviceSynchronize() is required in order to get
//proper timing.
checkCudaErrors(cudaDeviceSynchronize());
//get the the total elapsed time in ms
sdkStopTimer(&timer);
checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop));
if (bDontUseGPUTiming) {
elapsedTimeInMs = sdkGetTimerValue(&timer);
}
//calculate bandwidth in MB/s
bandwidthInMBs = 2.0f
* ((float) (1 << 10) * memSize * (float) MEMCOPY_ITERATIONS)
/ (elapsedTimeInMs * (float) (1 << 20));
//clean up memory
sdkDeleteTimer(&timer);
free(h_idata);
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
return bandwidthInMBs;
}
/////////////////////////////////////////////////////////
//print results in an easily read format
////////////////////////////////////////////////////////
void printResultsReadable(unsigned int *memSizes, double *bandwidths,
unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs,
bool wc) {
printf(" %s Bandwidth, %i Device(s)\n", sMemoryCopyKind[kind], iNumDevs);
printf(" %s Memory Transfers\n", sMemoryMode[memMode]);
if (wc) {
printf(" Write-Combined Memory Writes are Enabled");
}
printf(" Transfer Size (Bytes)\tBandwidth(MB/s)\n");
unsigned int i;
for (i = 0; i < (count - 1); i++) {
printf(" %u\t\t\t%s%.1f\n", memSizes[i],
(memSizes[i] < 10000) ? "\t" : "", bandwidths[i]);
}
printf(" %u\t\t\t%s%.1f\n\n", memSizes[i],
(memSizes[i] < 10000) ? "\t" : "", bandwidths[i]);
}
///////////////////////////////////////////////////////////////////////////
//print results in a database format
///////////////////////////////////////////////////////////////////////////
void printResultsCSV(unsigned int *memSizes, double *bandwidths,
unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs,
bool wc) {
std::string sConfig;
// log config information
if (kind == DEVICE_TO_DEVICE) {
sConfig += "D2D";
} else {
if (kind == DEVICE_TO_HOST) {
sConfig += "D2H";
} else if (kind == HOST_TO_DEVICE) {
sConfig += "H2D";
}
if (memMode == PAGEABLE) {
sConfig += "-Paged";
} else if (memMode == PINNED) {
sConfig += "-Pinned";
if (wc) {
sConfig += "-WriteCombined";
}
}
}
unsigned int i;
double dSeconds = 0.0;
for (i = 0; i < count; i++) {
dSeconds = (double) memSizes[i] / (bandwidths[i] * (double) (1 << 20));
printf(
"bandwidthTest-%s, Bandwidth = %.1f MB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n",
sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i],
iNumDevs);
}
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void) {
printf("Usage: bandwidthTest [OPTION]...\n");
printf(
"Test the bandwidth for device to host, host to device, and device to device transfers\n");
printf("\n");
printf(
"Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n");
printf(
"./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n");
printf("\n");
printf("Options:\n");
printf("--help\tDisplay this help menu\n");
printf("--csv\tPrint results as a CSV\n");
printf("--device=[deviceno]\tSpecify the device device to be used\n");
printf(" all - compute cumulative bandwidth on all the devices\n");
printf(" 0,1,2,...,n - Specify any particular device to be used\n");
printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n");
printf(" pageable - pageable memory\n");
printf(" pinned - non-pageable system memory\n");
printf("--mode=[MODE]\tSpecify the mode to use\n");
printf(" quick - performs a quick measurement\n");
printf(" range - measures a user-specified range of values\n");
printf(" shmoo - performs an intense shmoo of a large range of values\n");
printf("--htod\tMeasure host to device transfers\n");
printf("--dtoh\tMeasure device to host transfers\n");
printf("--dtod\tMeasure device to device transfers\n");
#if CUDART_VERSION >= 2020
printf("--wc\tAllocate pinned memory as write-combined\n");
#endif
printf("--cputiming\tForce CPU-based timing always\n");
printf("Range mode options\n");
printf("--start=[SIZE]\tStarting transfer size in bytes\n");
printf("--end=[SIZE]\tEnding transfer size in bytes\n");
printf("--increment=[SIZE]\tIncrement size in bytes\n");
}
|
bc07c849e1e378a929bb2f35adeb0ff0cfec6d28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <assert.h>
#include <vector>
#include "utils.h"
const unsigned warp_size = 32;
__global__
void simpleTranspose(int* array_in, int* array_out, int n_rows, int n_cols) {
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
int col = tid % n_cols;
int row = tid / n_cols;
if(col < n_cols && row < n_rows) {
array_out[col * n_rows + row] = array_in[row * n_cols + col];
}
}
__global__
void simpleTranspose2D(int* array_in, int* array_out, int n_rows, int n_cols) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
if(col < n_cols && row < n_rows) {
array_out[col * n_rows + row] = array_in[row * n_cols + col];
}
}
template<int num_warps>
__global__
void fastTranspose(int* array_in, int* array_out, int n_rows, int n_cols) {
const int warp_id = threadIdx.y;
const int lane = threadIdx.x;
__shared__ int block[warp_size][warp_size+1];
const int bc = blockIdx.x;
const int br = blockIdx.y;
// Load 32x32 block into shared memory
int gc = bc * warp_size + lane; // Global column index
for(int i = 0; i < warp_size / num_warps; ++i) {
int gr = br * warp_size + i * num_warps + warp_id; // Global row index
block[i * num_warps + warp_id][lane] = array_in[gr * n_cols + gc];
}
__syncthreads();
// Now we switch to each warp outputting a row, which will read
// from a column in the shared memory. This way everything remains
// coalesced.
int gr = br * warp_size + lane;
for(int i = 0; i < warp_size / num_warps; ++i) {
int gc = bc * warp_size + i * num_warps + warp_id;
array_out[gc * n_rows + gr] = block[lane][i * num_warps + warp_id];
}
}
void isTranspose(const std::vector<int>& A,
const std::vector<int>& B,
int n) {
for(int i = 0; i < n; ++i) {
for(int j = 0; j < n; ++j) {
assert(A[n * i + j] == B[n * j + i]);
}
}
}
/* Must be an odd number */
#define MEMCOPY_ITERATIONS 11
int main(void) {
const int n = (1<<12);
printf("Number of MB to transpose: %ld\n\n",sizeof(int) * n * n / 1024 / 1024);
int num_threads, num_blocks;
std::vector<int> h_in(n * n);
std::vector<int> h_out(n * n);
for(int i = 0; i < n * n; ++i) {
h_in[i] = random() % 100;
}
int* d_in, *d_out;
checkCudaErrors(hipMalloc(&d_in, sizeof(int) * n * n));
checkCudaErrors(hipMalloc(&d_out, sizeof(int) * n * n));
GpuTimer timer;
timer.start();
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(hipMemcpy(d_out, d_in, sizeof(int) * n * n,
hipMemcpyDeviceToDevice));
}
timer.stop();
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
printf("Bandwidth bench\n");
printf("GPU took %g ms\n",timer.elapsed() / MEMCOPY_ITERATIONS);
printf("Effective bandwidth is %g GB/s\n",
(2*sizeof(int)*n*n*MEMCOPY_ITERATIONS)/(1e9*1e-3*timer.elapsed()));
checkCudaErrors(hipMemcpy(d_in, &h_in[0], sizeof(int) * n * n,
hipMemcpyHostToDevice));
num_threads = 256;
num_blocks = (n * n + num_threads - 1) / num_threads;
timer.start();
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
hipLaunchKernelGGL(( simpleTranspose), dim3(num_blocks), dim3(num_threads), 0, 0, d_in, d_out, n, n);
}
timer.stop();
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(&h_out[0], d_out, sizeof(int) * n * n,
hipMemcpyDeviceToHost));
isTranspose(h_in, h_out, n);
printf("\nsimpleTranspose\n");
printf("GPU took %g ms\n",timer.elapsed());
printf("Effective bandwidth is %g GB/s\n",
(2*sizeof(int)*n*n*MEMCOPY_ITERATIONS)/(1e9*1e-3*timer.elapsed()));
dim3 block_dim(8, 32);
dim3 grid_dim(n / 8, n / 32);
timer.start();
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
hipLaunchKernelGGL(( simpleTranspose2D), dim3(grid_dim), dim3(block_dim), 0, 0, d_in, d_out, n, n);
}
timer.stop();
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(&h_out[0], d_out, sizeof(int) * n * n,
hipMemcpyDeviceToHost));
isTranspose(h_in, h_out, n);
printf("\nsimpleTranspose2D\n");
printf("GPU took %g ms\n",timer.elapsed());
printf("Effective bandwidth is %g GB/s\n",
(2*sizeof(int)*n*n*MEMCOPY_ITERATIONS)/(1e9*1e-3*timer.elapsed()));
const int num_warps_per_block = 256/32;
assert(warp_size % num_warps_per_block == 0);
block_dim.x = warp_size;
block_dim.y = num_warps_per_block;
grid_dim.x = n / warp_size;
grid_dim.y = n / warp_size;
timer.start();
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
hipLaunchKernelGGL(( fastTranspose<num_warps_per_block>), dim3(grid_dim), dim3(block_dim), 0, 0, d_in, d_out, n,
n);
}
timer.stop();
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(&h_out[0], d_out, sizeof(int) * n * n,
hipMemcpyDeviceToHost));
isTranspose(h_in, h_out, n);
printf("\nfastTranspose\n");
printf("GPU took %g ms\n",timer.elapsed());
printf("Effective bandwidth is %g GB/s\n",
(2*sizeof(int)*n*n*MEMCOPY_ITERATIONS)/(1e9*1e-3*timer.elapsed()));
checkCudaErrors(hipFree(d_in));
checkCudaErrors(hipFree(d_out));
return 0;
}
| bc07c849e1e378a929bb2f35adeb0ff0cfec6d28.cu | #include <cstdio>
#include <cstdlib>
#include <assert.h>
#include <vector>
#include "utils.h"
const unsigned warp_size = 32;
__global__
void simpleTranspose(int* array_in, int* array_out, int n_rows, int n_cols) {
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
int col = tid % n_cols;
int row = tid / n_cols;
if(col < n_cols && row < n_rows) {
array_out[col * n_rows + row] = array_in[row * n_cols + col];
}
}
__global__
void simpleTranspose2D(int* array_in, int* array_out, int n_rows, int n_cols) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
if(col < n_cols && row < n_rows) {
array_out[col * n_rows + row] = array_in[row * n_cols + col];
}
}
template<int num_warps>
__global__
void fastTranspose(int* array_in, int* array_out, int n_rows, int n_cols) {
const int warp_id = threadIdx.y;
const int lane = threadIdx.x;
__shared__ int block[warp_size][warp_size+1];
const int bc = blockIdx.x;
const int br = blockIdx.y;
// Load 32x32 block into shared memory
int gc = bc * warp_size + lane; // Global column index
for(int i = 0; i < warp_size / num_warps; ++i) {
int gr = br * warp_size + i * num_warps + warp_id; // Global row index
block[i * num_warps + warp_id][lane] = array_in[gr * n_cols + gc];
}
__syncthreads();
// Now we switch to each warp outputting a row, which will read
// from a column in the shared memory. This way everything remains
// coalesced.
int gr = br * warp_size + lane;
for(int i = 0; i < warp_size / num_warps; ++i) {
int gc = bc * warp_size + i * num_warps + warp_id;
array_out[gc * n_rows + gr] = block[lane][i * num_warps + warp_id];
}
}
void isTranspose(const std::vector<int>& A,
const std::vector<int>& B,
int n) {
for(int i = 0; i < n; ++i) {
for(int j = 0; j < n; ++j) {
assert(A[n * i + j] == B[n * j + i]);
}
}
}
/* Must be an odd number */
#define MEMCOPY_ITERATIONS 11
int main(void) {
const int n = (1<<12);
printf("Number of MB to transpose: %ld\n\n",sizeof(int) * n * n / 1024 / 1024);
int num_threads, num_blocks;
std::vector<int> h_in(n * n);
std::vector<int> h_out(n * n);
for(int i = 0; i < n * n; ++i) {
h_in[i] = random() % 100;
}
int* d_in, *d_out;
checkCudaErrors(cudaMalloc(&d_in, sizeof(int) * n * n));
checkCudaErrors(cudaMalloc(&d_out, sizeof(int) * n * n));
GpuTimer timer;
timer.start();
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
checkCudaErrors(cudaMemcpy(d_out, d_in, sizeof(int) * n * n,
cudaMemcpyDeviceToDevice));
}
timer.stop();
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
printf("Bandwidth bench\n");
printf("GPU took %g ms\n",timer.elapsed() / MEMCOPY_ITERATIONS);
printf("Effective bandwidth is %g GB/s\n",
(2*sizeof(int)*n*n*MEMCOPY_ITERATIONS)/(1e9*1e-3*timer.elapsed()));
checkCudaErrors(cudaMemcpy(d_in, &h_in[0], sizeof(int) * n * n,
cudaMemcpyHostToDevice));
num_threads = 256;
num_blocks = (n * n + num_threads - 1) / num_threads;
timer.start();
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
simpleTranspose<<<num_blocks, num_threads>>>(d_in, d_out, n, n);
}
timer.stop();
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(&h_out[0], d_out, sizeof(int) * n * n,
cudaMemcpyDeviceToHost));
isTranspose(h_in, h_out, n);
printf("\nsimpleTranspose\n");
printf("GPU took %g ms\n",timer.elapsed());
printf("Effective bandwidth is %g GB/s\n",
(2*sizeof(int)*n*n*MEMCOPY_ITERATIONS)/(1e9*1e-3*timer.elapsed()));
dim3 block_dim(8, 32);
dim3 grid_dim(n / 8, n / 32);
timer.start();
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
simpleTranspose2D<<<grid_dim, block_dim>>>(d_in, d_out, n, n);
}
timer.stop();
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(&h_out[0], d_out, sizeof(int) * n * n,
cudaMemcpyDeviceToHost));
isTranspose(h_in, h_out, n);
printf("\nsimpleTranspose2D\n");
printf("GPU took %g ms\n",timer.elapsed());
printf("Effective bandwidth is %g GB/s\n",
(2*sizeof(int)*n*n*MEMCOPY_ITERATIONS)/(1e9*1e-3*timer.elapsed()));
const int num_warps_per_block = 256/32;
assert(warp_size % num_warps_per_block == 0);
block_dim.x = warp_size;
block_dim.y = num_warps_per_block;
grid_dim.x = n / warp_size;
grid_dim.y = n / warp_size;
timer.start();
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) {
fastTranspose<num_warps_per_block><<<grid_dim, block_dim>>>(d_in, d_out, n,
n);
}
timer.stop();
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(&h_out[0], d_out, sizeof(int) * n * n,
cudaMemcpyDeviceToHost));
isTranspose(h_in, h_out, n);
printf("\nfastTranspose\n");
printf("GPU took %g ms\n",timer.elapsed());
printf("Effective bandwidth is %g GB/s\n",
(2*sizeof(int)*n*n*MEMCOPY_ITERATIONS)/(1e9*1e-3*timer.elapsed()));
checkCudaErrors(cudaFree(d_in));
checkCudaErrors(cudaFree(d_out));
return 0;
}
|
28991f5bccb737e3b2d4c3df583911ddd1b9a4bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/ziterilu_kernels.cu normal z -> d, Tue Feb 9 16:05:44 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
__global__ void
magma_diterilu_csr_kernel(
magma_int_t num_rows,
magma_int_t nnz,
magma_index_t *rowidxA,
magma_index_t *colidxA,
const double * __restrict__ A,
magma_index_t *rowptrL,
magma_index_t *colidxL,
double *valL,
magma_index_t *rowptrU,
magma_index_t *rowidxU,
double *valU )
{
int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double s, sp;
int il, iu, jl, ju;
if (k < nnz)
{
i = rowidxA[k];
j = colidxA[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg( A+k );
#else
s = A[k];
#endif
il = rowptrL[i];
iu = rowptrU[j];
while (il < rowptrL[i+1] && iu < rowptrU[j+1])
{
sp = zero;
jl = colidxL[il];
ju = rowidxU[iu];
// avoid branching
sp = ( jl == ju ) ? valL[il] * valU[iu] : sp;
s = ( jl == ju ) ? s-sp : s;
il = ( jl <= ju ) ? il+1 : il;
iu = ( jl >= ju ) ? iu+1 : iu;
}
// undo the last operation (it must be the last)
s += sp;
__syncthreads();
if ( i > j ) // modify l entry
valL[il-1] = s / valU[rowptrU[j+1]-1];
else { // modify u entry
valU[iu-1] = s;
}
}
}// kernel
/**
Purpose
-------
This routine iteratively computes an incomplete LU factorization.
The idea is according to Edmond Chow's presentation at SIAM 2014.
This routine was used in the ISC 2015 paper:
E. Chow et al.: 'Study of an Asynchronous Iterative Algorithm
for Computing Incomplete Factorizations on GPUs'
The input format of the matrix is Magma_CSRCOO for the upper and lower
triangular parts. Note however, that we flip col and rowidx for the
U-part.
Every component of L and U is handled by one thread.
Arguments
---------
@param[in]
A magma_d_matrix
input matrix A determing initial guess & processing order
@param[in,out]
L magma_d_matrix
input/output matrix L containing the ILU approximation
@param[in,out]
U magma_d_matrix
input/output matrix U containing the ILU approximation
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_diterilu_csr(
magma_d_matrix A,
magma_d_matrix L,
magma_d_matrix U,
magma_queue_t queue )
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
// Runtime API
// hipFuncCachePreferShared: shared memory is 48 KB
// hipFuncCachePreferEqual: shared memory is 32 KB
// hipFuncCachePreferL1: shared memory is 16 KB
// hipFuncCachePreferNone: no preference
//hipFuncSetCacheConfig(hipFuncCachePreferShared);
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_diterilu_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
A.num_rows, A.nnz,
A.rowidx, A.col, A.val,
L.row, L.col, L.val,
U.row, U.col, U.val );
return MAGMA_SUCCESS;
}
| 28991f5bccb737e3b2d4c3df583911ddd1b9a4bf.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/ziterilu_kernels.cu normal z -> d, Tue Feb 9 16:05:44 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
__global__ void
magma_diterilu_csr_kernel(
magma_int_t num_rows,
magma_int_t nnz,
magma_index_t *rowidxA,
magma_index_t *colidxA,
const double * __restrict__ A,
magma_index_t *rowptrL,
magma_index_t *colidxL,
double *valL,
magma_index_t *rowptrU,
magma_index_t *rowidxU,
double *valU )
{
int i, j;
int k = blockDim.x * blockIdx.x + threadIdx.x;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double s, sp;
int il, iu, jl, ju;
if (k < nnz)
{
i = rowidxA[k];
j = colidxA[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg( A+k );
#else
s = A[k];
#endif
il = rowptrL[i];
iu = rowptrU[j];
while (il < rowptrL[i+1] && iu < rowptrU[j+1])
{
sp = zero;
jl = colidxL[il];
ju = rowidxU[iu];
// avoid branching
sp = ( jl == ju ) ? valL[il] * valU[iu] : sp;
s = ( jl == ju ) ? s-sp : s;
il = ( jl <= ju ) ? il+1 : il;
iu = ( jl >= ju ) ? iu+1 : iu;
}
// undo the last operation (it must be the last)
s += sp;
__syncthreads();
if ( i > j ) // modify l entry
valL[il-1] = s / valU[rowptrU[j+1]-1];
else { // modify u entry
valU[iu-1] = s;
}
}
}// kernel
/**
Purpose
-------
This routine iteratively computes an incomplete LU factorization.
The idea is according to Edmond Chow's presentation at SIAM 2014.
This routine was used in the ISC 2015 paper:
E. Chow et al.: 'Study of an Asynchronous Iterative Algorithm
for Computing Incomplete Factorizations on GPUs'
The input format of the matrix is Magma_CSRCOO for the upper and lower
triangular parts. Note however, that we flip col and rowidx for the
U-part.
Every component of L and U is handled by one thread.
Arguments
---------
@param[in]
A magma_d_matrix
input matrix A determing initial guess & processing order
@param[in,out]
L magma_d_matrix
input/output matrix L containing the ILU approximation
@param[in,out]
U magma_d_matrix
input/output matrix U containing the ILU approximation
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_diterilu_csr(
magma_d_matrix A,
magma_d_matrix L,
magma_d_matrix U,
magma_queue_t queue )
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
// Runtime API
// cudaFuncCachePreferShared: shared memory is 48 KB
// cudaFuncCachePreferEqual: shared memory is 32 KB
// cudaFuncCachePreferL1: shared memory is 16 KB
// cudaFuncCachePreferNone: no preference
//cudaFuncSetCacheConfig(cudaFuncCachePreferShared);
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_diterilu_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( A.num_rows, A.nnz,
A.rowidx, A.col, A.val,
L.row, L.col, L.val,
U.row, U.col, U.val );
return MAGMA_SUCCESS;
}
|
d2f423cfb565a38ea53c900e0037f33c00689a74.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
int ctaid=blockIdx.x*blockIdx.x ;
int offset=THREADS_PER_BLOCK/2;
float sum = 0.0;
float mult = 2.5;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
if(tid%2==0){
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
}
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
B[tid] = sum;
A[tid*2]= sum;
if(tid%2==1 && tid<ctaid+offset/2){
sum*=mult;
sum*=mult;
sum*=mult;
}
A[tid*2] = tex1Dfetch(texmem2,tid)*B[tid]+sum;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(N*sizeof(float));
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
hipMalloc((void**) &device_texture1, N);
hipMalloc((void**) &device_texture2, N);
hipMemcpy(device_texture1, host_texture1, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, N*sizeof(float), hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, N);
hipBindTexture(0, texmem2, device_texture2, N);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A1, size1) );
checkCudaErrors( hipMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( hipDeviceSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
hipFree(d_A1);
if (d_A2)
hipFree(d_A2);
if (d_A3)
hipFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| d2f423cfb565a38ea53c900e0037f33c00689a74.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
int ctaid=blockIdx.x*blockIdx.x ;
int offset=THREADS_PER_BLOCK/2;
float sum = 0.0;
float mult = 2.5;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
if(tid%2==0){
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
}
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
sum+=tex1Dfetch(texmem1,tid);
B[tid] = sum;
A[tid*2]= sum;
if(tid%2==1 && tid<ctaid+offset/2){
sum*=mult;
sum*=mult;
sum*=mult;
}
A[tid*2] = tex1Dfetch(texmem2,tid)*B[tid]+sum;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(N*sizeof(float));
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
cudaMalloc((void**) &device_texture1, N);
cudaMalloc((void**) &device_texture2, N);
cudaMemcpy(device_texture1, host_texture1, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, N*sizeof(float), cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, N);
cudaBindTexture(0, texmem2, device_texture2, N);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A1, size1) );
checkCudaErrors( cudaMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( cudaThreadSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
cudaFree(d_A1);
if (d_A2)
cudaFree(d_A2);
if (d_A3)
cudaFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
6eb13059557b04201baf7ac5d8679439f4602564.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_common.h"
#include <stdio.h>
namespace allovolume {
void* gpuAllocate(size_t size) {
void* result = 0;
hipError_t err = hipMalloc(&result, size);
if(!result) {
fprintf(stderr, "cudaAllocate: hipMalloc() of %lu (%.2f MB): %s\n",
size, size / 1048576.0,
hipGetErrorString(err));
size_t memory_free, memory_total;
hipMemGetInfo(&memory_free, &memory_total);
fprintf(stderr, " Free: %.2f MB, Total: %.2f MB\n", (float)memory_free / 1048576.0, (float)memory_total / 1048576.0);
throw bad_alloc();
}
return result;
}
void gpuDeallocate(void* pointer) {
hipFree(pointer);
}
void gpuUpload(void* dest, const void* src, size_t size) {
hipError_t err = hipMemcpy(dest, src, size, hipMemcpyHostToDevice);
if(err != hipSuccess) {
fprintf(stderr, "cudaUpload: hipMemcpy(): %s\n", hipGetErrorString(err));
throw runtime_error();
}
}
void gpuDownload(void* dest, const void* src, size_t size) {
hipError_t err = hipMemcpy(dest, src, size, hipMemcpyDeviceToHost);
if(err != hipSuccess) {
fprintf(stderr, "cudaUpload: hipMemcpy(): %s\n", hipGetErrorString(err));
throw runtime_error();
}
}
}
| 6eb13059557b04201baf7ac5d8679439f4602564.cu | #include "cuda_common.h"
#include <stdio.h>
namespace allovolume {
void* gpuAllocate(size_t size) {
void* result = 0;
cudaError_t err = cudaMalloc(&result, size);
if(!result) {
fprintf(stderr, "cudaAllocate: cudaMalloc() of %lu (%.2f MB): %s\n",
size, size / 1048576.0,
cudaGetErrorString(err));
size_t memory_free, memory_total;
cudaMemGetInfo(&memory_free, &memory_total);
fprintf(stderr, " Free: %.2f MB, Total: %.2f MB\n", (float)memory_free / 1048576.0, (float)memory_total / 1048576.0);
throw bad_alloc();
}
return result;
}
void gpuDeallocate(void* pointer) {
cudaFree(pointer);
}
void gpuUpload(void* dest, const void* src, size_t size) {
cudaError_t err = cudaMemcpy(dest, src, size, cudaMemcpyHostToDevice);
if(err != cudaSuccess) {
fprintf(stderr, "cudaUpload: cudaMemcpy(): %s\n", cudaGetErrorString(err));
throw runtime_error();
}
}
void gpuDownload(void* dest, const void* src, size_t size) {
cudaError_t err = cudaMemcpy(dest, src, size, cudaMemcpyDeviceToHost);
if(err != cudaSuccess) {
fprintf(stderr, "cudaUpload: cudaMemcpy(): %s\n", cudaGetErrorString(err));
throw runtime_error();
}
}
}
|
b87339a66edbcc11472cc364027bf203fa8f3d49.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "simple_radar_pipeline.h"
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
index_t numChannels = 16;
index_t numPulses = 128;
index_t numSamples = 9000;
index_t waveformLength = 1000;
// cuda stream to place work in
hipStream_t stream;
hipStreamCreate(&stream);
// create some events for timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
auto radar = RadarPipeline(numPulses, numSamples, waveformLength, numChannels, stream);
rand(radar.GetTPCView(), NORMAL, 0, stream);
radar.DopplerProcessing();
printf("Doppler output:\n");
radar.GetTPCView().Slice<1>({0, 0, 0}, {matxSliceDim, matxSliceDim, 16}).rint();
hipStreamDestroy(stream);
return 0;
}
| b87339a66edbcc11472cc364027bf203fa8f3d49.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "simple_radar_pipeline.h"
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
index_t numChannels = 16;
index_t numPulses = 128;
index_t numSamples = 9000;
index_t waveformLength = 1000;
// cuda stream to place work in
cudaStream_t stream;
cudaStreamCreate(&stream);
// create some events for timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
auto radar = RadarPipeline(numPulses, numSamples, waveformLength, numChannels, stream);
rand(radar.GetTPCView(), NORMAL, 0, stream);
radar.DopplerProcessing();
printf("Doppler output:\n");
radar.GetTPCView().Slice<1>({0, 0, 0}, {matxSliceDim, matxSliceDim, 16}).rint();
cudaStreamDestroy(stream);
return 0;
}
|
c128ed338c61dcc4e9b0c344dc4f606c9d63fdbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This file is part of bp-layers.
//
// Copyright (C) 2020 Patrick Knbelreiter <knoebelreiter at icg dot tugraz dot at>
// Christian Sormann <christian dot sormann at icg dot tugraz dot at>
// Institute for Computer Graphics and Vision, Graz University of Technology
// https://www.tugraz.at/institute/icg/teams/team-pock/
//
// bp-layers is free software: you can redistribute it and/or modify it under the
// terms of the GNU Affero General Public License as published by the Free Software
// Foundation, either version 3 of the License, or any later version.
//
// bp-layers is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "../../include/error_util.h"
#include "lbp_min_sum_kernel.cuh"
#include "util.cuh"
// ============================================================================
// CUDA KERNELS
// ============================================================================
__global__ void lbp_cuda_forward_kernel_reduction_min_sum(
KernelData cost,
KernelData jump,
KernelData edges,
KernelData5 messages,
KernelData5 messages_argmin,
KernelData message_scale,
const unsigned short x_in,
const unsigned short direction,
int shared_mem_offset, unsigned short delta)
{
unsigned short y = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y;
unsigned short x = 0;
if(direction == UP || direction == DOWN)
{
x = y;
y = x_in;
}
else
{
x = x_in;
// y = y;
}
// shared memory h
extern __shared__ float sdata[];
// message size is N x 4 x H x W x C
// cost size is N x H x W x C
// edges: N x 1 x H x W
// jumps: 1 x 1 x H x W
const short N = cost.size0;
const short H = cost.size1;
const short W = cost.size2;
const short C = cost.size3;
const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x;
const float max_float = 1e15;
// check inside image
if(c >= C || x >= W || y >= H)
{
// write large number that will never win
sdata[tid] = max_float;
return;
}
unsigned int n = 0;
float L2 = jump(0, direction, 0, jump.size3 - 1);
unsigned short start = max(c - delta + 1, 0);
unsigned short stop = min(c + delta - 1, C - 1);
float edgeWeight = edges(n, direction, y, x);
// write to shared memory
// compute message for every label
sdata[tid] = cost(n, y, x, c);
// add costs from all neighbors
if(direction != RIGHT) { sdata[tid] += messages(n, RIGHT, y, x, c); }
if(direction != LEFT) { sdata[tid] += messages(n, LEFT, y, x, c); }
if(direction != UP) { sdata[tid] += messages(n, UP, y, x, c); }
if(direction != DOWN) { sdata[tid] += messages(n, DOWN, y, x, c); }
float h = sdata[tid];
__syncthreads();
// save h in shared mem
sdata[tid] = h;
sdata[tid + shared_mem_offset] = static_cast<float>(c);
__syncthreads();
// if delta is larger or equal than this threshold use old version as it is a little faster
int old_version_threshold = C;
float msg = 0.0;
int msg_argmin = 0;
// if there is no truncation use old version
if(delta >= old_version_threshold)
{
//OLD VERSION /////////////////////
sdata[tid] = h;
__syncthreads();
msg = max_float; //minVal + jump(0, direction, 0, jump.size3 - 1) * edgeWeight;
msg_argmin = 0;
for(unsigned short label = 0; label < C; ++label)
{
// compute min in local var to avoid global mem writes
float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump(0,direction,label,c) * edgeWeight;
msg = fminf(msg, new_msg);
if(msg == new_msg)
{
msg_argmin = label;
}
}
__syncthreads();
/////////////////
}
else
{
//TRUNC SPEED UP VERSION ///////////////////////////////////
for(unsigned int s=blockDim.y / 2; s > 0; s>>=1)
{
if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C)
{
//min parallel reduction
float min_val = sdata[tid];
float min_label = sdata[tid + shared_mem_offset];
if(sdata[tid + s] <= sdata[tid])
{
min_val = sdata[tid + s];
min_label = sdata[shared_mem_offset + tid + s];
}
//min val parallel reduction
sdata[tid] = min_val;
//argmin prallel reduction
sdata[shared_mem_offset + tid] = min_label;
}
__syncthreads();
}
float min_h = sdata[threadIdx.x * blockDim.y];
int argmin_h = sdata[shared_mem_offset + threadIdx.x * blockDim.y];
__syncthreads();
msg = min_h + jump(0, direction, 0, jump.size3 - 1) * edgeWeight;
msg_argmin = static_cast<int>(argmin_h);
sdata[tid] = h;
__syncthreads();
for(unsigned short label = start; label < stop + 1; ++label)
{
// compute min in local var to avoid global mem writes
float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump(0,direction,label, c) * edgeWeight;
if(new_msg <= msg)
{
msg = new_msg;
msg_argmin = label;
}
}
__syncthreads();
/////////////////////////////
}
// if(x == 2 && y == 0 && direction == DOWN)
// {
// printf("argmin : %i argmin h %i min_val %f min_h %f h: %f \n", msg_argmin, argmin_h, msg, min_h, h);
// }
// compute normalization with 2nd reduction
sdata[tid] = (float)exp((double)msg);
__syncthreads();
for(unsigned int s=blockDim.y / 2; s > 0; s>>=1)
{
if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// normalize message
double sum_exp = max((double)sdata[blockDim.y * threadIdx.x], 1e-45);
float logSumExp = (float)log(sum_exp);
// if(sum_exp < 1e-10)
// {
// printf("sum exp zero: %f , logsumexp: %f msg: %f \n", sum_exp, msg);
// }
//float logSumExp = 0.0;
if(direction == RIGHT)
{
messages(n, LEFT, y, x+1, c) = msg - logSumExp;
messages_argmin(n, LEFT, y, x+1, c) = msg_argmin;
message_scale(n, LEFT, y, x+1) = sum_exp;
}
if(direction == LEFT)
{
messages(n, RIGHT, y, x-1, c) = msg - logSumExp;
messages_argmin(n, RIGHT, y, x-1, c) = msg_argmin;
message_scale(n, RIGHT, y, x-1) = sum_exp;
}
if(direction == UP)
{
messages(n, DOWN, y-1, x, c) = msg - logSumExp;
messages_argmin(n, DOWN, y-1, x, c) = msg_argmin;
message_scale(n, DOWN, y-1, x) = sum_exp;
}
if(direction == DOWN)
{
messages(n, UP, y+1, x, c) = msg - logSumExp;
messages_argmin(n, UP, y+1, x, c) = msg_argmin;
message_scale(n, UP, y+1, x) = sum_exp;
}
}
__global__ void lbp_cuda_backward_kernel_reduction_min_sum(
KernelData cost,
KernelData jump,
KernelData edges,
KernelData5 messages,
KernelData5 messages_argmin,
KernelData message_scale,
KernelData5 in_grad,
KernelData gradient_unary,
KernelData gradient_pairwise,
KernelData gradient_edge,
KernelData gradient_accumulation,
KernelData gradient_accumulation_tmp,
KernelData5 saved_prev_grad_msg,
const unsigned short x_in,
const unsigned short direction,
bool compute_cross,
const unsigned int n)
{
//initialize utility variables
unsigned short y = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y;
//unsigned int n = 0;
unsigned int x;
if(direction == UP || direction == DOWN)
{
x = y;
y = x_in;
}
else
{
x = x_in;
}
// shared memory h
extern __shared__ float sdata[];
// message size is N x 4 x H x W x C
// cost size is N x H x W x C
// edges: N x 1 x H x W
// jumps: 1 x 1 x H x W
const short N = cost.size0;
const short H = cost.size1;
const short W = cost.size2;
const short C = cost.size3;
const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x;
const float max_float = 1e15;
// check inside image
if(c >= C || x >= W || y >= H)
{
// write large number that will never win
sdata[tid] = max_float;
return;
}
//calc backward message
short prev_row_shift = 0;
short prev_col_shift = 0;
if(direction == LEFT)
{
prev_row_shift = 0;
prev_col_shift = 1;
}
if(direction == RIGHT)
{
prev_row_shift = 0;
prev_col_shift = -1;
}
if(direction == DOWN)
{
prev_row_shift = -1;
prev_col_shift = 0;
}
if(direction == UP)
{
prev_row_shift = 1;
prev_col_shift = 0;
}
int grad_xy_idx = 0;
if(direction == UP)
{
grad_xy_idx = DOWN;
}
if(direction == DOWN)
{
grad_xy_idx = UP;
}
if(direction == LEFT)
{
grad_xy_idx = RIGHT;
}
if(direction == RIGHT)
{
grad_xy_idx = LEFT;
}
float edgeWeight = edges(n, grad_xy_idx, y, x);
int HOR_IDX = 0;
int UP_IDX = 1;
int DOWN_IDX = 2;
///////////////////////in_grad normalization ////////////////////////////////////////////
float original_message_val = messages(n, direction, y + prev_row_shift, x + prev_col_shift, c) + log(message_scale(n, direction, y + prev_row_shift, x + prev_col_shift));
float message_exp_sum = message_scale(n, direction, y + prev_row_shift, x + prev_col_shift);
sdata[tid] = in_grad(n, direction, y + prev_row_shift, x + prev_col_shift, c);
__syncthreads();
float in_grad_normalized = 0.0;
// normalization
for(unsigned short label = 0; label < C; ++label)
{
float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val);
if(c == label)
{
J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val);
}
//printf("tid %i label %i norm msg val %f \n", tid, label, norm_msg_val);
//in_grad is in sdata
in_grad_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor;
}
__syncthreads();
///////////////////////acc normalization ////////////////////////////////////////////
sdata[tid] = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX);
__syncthreads();
float acc_normalized = 0.0;
// normalization
for(unsigned short label = 0; label < C; ++label)
{
float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val);
if(c == label)
{
J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val);
}
//in_grad is in sdata
acc_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor;
}
__syncthreads();
/////////////////////////////////
int min_index = (int)messages_argmin(n, direction, y + prev_row_shift, x + prev_col_shift, c);
float additive_hor = in_grad_normalized + acc_normalized;
float additive_up = 0.0;
float additive_down = 0.0;
if(compute_cross)
{
additive_up = saved_prev_grad_msg(n, UP, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, UP_IDX);
additive_down = saved_prev_grad_msg(n, DOWN, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, DOWN_IDX);
}
// so that gradient_acc is not changed before assigning
__syncthreads();
//unary gradient
atomicAdd(&gradient_unary(n, y, x, min_index), additive_hor);
atomicAdd(&gradient_unary(n, y, x, min_index), additive_up);
atomicAdd(&gradient_unary(n, y, x, min_index), additive_down);
//pairwise gradient
atomicAdd(&gradient_pairwise(0, grad_xy_idx, min_index, c), edgeWeight * additive_hor);
atomicAdd(&gradient_pairwise(0, grad_xy_idx, min_index, c), edgeWeight * additive_up);
atomicAdd(&gradient_pairwise(0, grad_xy_idx, min_index, c), edgeWeight * additive_down);
//edge gradient
atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(0, grad_xy_idx, min_index, c) * additive_hor);
atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(0, grad_xy_idx, min_index, c) * additive_up);
atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(0, grad_xy_idx, min_index, c) * additive_down);
updateGradientAcc(gradient_accumulation_tmp, additive_hor, direction, n, y, x, min_index, HOR_IDX);
updateGradientAcc(gradient_accumulation_tmp, additive_up, direction, n, y, x, min_index, UP_IDX);
updateGradientAcc(gradient_accumulation_tmp, additive_down, direction, n, y, x, min_index, DOWN_IDX);
__syncthreads();
setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, HOR_IDX), direction, n, y, x, c, HOR_IDX);
setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, UP_IDX), direction, n, y, x, c, UP_IDX);
setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, DOWN_IDX), direction, n, y, x, c, DOWN_IDX);
__syncthreads();
saved_prev_grad_msg(n, direction, y, x, c) = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX);
}
// ============================================================================
// CPP KERNEL CALLS
// ============================================================================
namespace cuda
{
std::vector<at::Tensor> lbp_reduction_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor messages, unsigned short delta)
{
int N = cost.size(0);
int H = cost.size(1);
int W = cost.size(2);
int C = cost.size(3);
//int max_iter = 2;
auto options = at::TensorOptions(cost.options());
// at::Tensor messages = at::zeros({N, 4, H, W, C}, options);
at::Tensor messages_argmin = at::zeros({N, 4, H, W, C}, options);
at::Tensor message_scale = at::zeros({N, 4, H, W}, options);
//cost = cost.permute({0, 2, 3, 1}).contiguous();
// parallelize over image rows and disparities
// block-size in disparity dimension must be >= number of disparities
// then all the synchronization can be done over blocks (fast)
// otherwise global synchronization is necessary
int blockDimC = static_cast<int>(::min(powf(2.0f, ::ceil(log2f(C))), 1024.0f));
int blockDimHW = static_cast<int>(::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f));
// attention: 1024 is maximal number of threads per block!!
const dim3 blockSize(blockDimHW, blockDimC);
const dim3 numBlocksLR(::ceil(H / static_cast<float>(blockSize.x)),
::ceil(C / static_cast<float>(blockSize.y)));
const dim3 numBlocksUD(::ceil(W / static_cast<float>(blockSize.x)),
::ceil(C / static_cast<float>(blockSize.y)));
if(numBlocksLR.y != 1)
{
std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1=:" << numBlocksLR.y << "C=" << C << std::endl;
}
const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z;
// to Right
for(unsigned short x = 0; x < W - 1; ++x)
{
// compute min messages
hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, x, RIGHT, threadsPerBlock, delta);
cudaSafeCall(hipGetLastError());
}
// to LEFT
for(unsigned short x = W - 1; x > 0; --x)
{
// compute min messages
hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, x, LEFT, threadsPerBlock, delta);
cudaSafeCall(hipGetLastError());
}
// to DOWN
for(unsigned short y = 0; y < H - 1; ++y)
{
// compute min messages
hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, y, DOWN, threadsPerBlock, delta);
cudaSafeCall(hipGetLastError());
}
// to UP
for(unsigned short y = H - 1; y > 0; --y)
{
// compute min messages
hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, y, UP, threadsPerBlock, delta);
cudaSafeCall(hipGetLastError());
}
//auto beliefs = messages.sum({1}) + cost;
std::vector<at::Tensor> output_vec;
output_vec.push_back(messages);
output_vec.push_back(messages_argmin);
output_vec.push_back(message_scale);
return output_vec;
}
std::vector<at::Tensor> lbp_forward_min_sum(at::Tensor cost,
at::Tensor jump,
at::Tensor edge,
at::Tensor messages, unsigned short delta)
{
return lbp_reduction_min_sum(cost, jump, edge, messages, delta);
}
//=============================================================================
// BACKWARD
//=============================================================================
std::vector<at::Tensor> lbp_backward_min_sum(at::Tensor cost,
at::Tensor jump,
at::Tensor edge,
at::Tensor in_grad,
at::Tensor messages,
at::Tensor messages_argmin,
at::Tensor message_scale)
{
int N = cost.size(0);
int H = cost.size(1);
int W = cost.size(2);
int C = cost.size(3);
auto options = at::TensorOptions(cost.options());
at::Tensor gradient_unary = at::zeros({N, H, W, C}, options);
at::Tensor gradient_pairwise = at::zeros({N, 4, C, C}, options);
at::Tensor gradient_edge = at::zeros({N, 4, H, W}, options);
at::Tensor gradient_messages = at::zeros({N, 4, H, W, C}, options);
gradient_messages += in_grad;
at::Tensor saved_prev_grad_msg = at::zeros({N, 4, H, W, C}, options);
at::Tensor gradient_accumulation;
// parallelize over image rows and disparities
// block-size in disparity dimension must be >= number of disparities
// then all the synchronization can be done over blocks (fast)
// otherwise global synchronization is necessary
int blockDimC = static_cast<int>(::min(powf(2.0f, ::ceil(log2f(C))), 1024.0f));
int blockDimHW = static_cast<int>(::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f));
// attention: 1024 is maximal number of threads per block!!
const dim3 blockSize(blockDimHW, blockDimC);
const dim3 numBlocksLR(::ceil(H / static_cast<float>(blockSize.x)),
::ceil(C / static_cast<float>(blockSize.y)));
const dim3 numBlocksUD(::ceil(W / static_cast<float>(blockSize.x)),
::ceil(C / static_cast<float>(blockSize.y)));
//printf("blockDimC %i \n", blockDimC);
//printf("blockDimHW %i \n", blockDimHW);
if(numBlocksLR.y != 1)
std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1: " << numBlocksLR.y << std::endl;
const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z;
const float max_float = 1e15;
for(int n = 0; n < N; ++n)
{
////////////////////UNARY GRADIENT////////////////////////////
//to DOWN
gradient_accumulation = at::zeros({N, W, 3, C}, options);
for(short y = 1; y < H; ++y)
{
// compute min messages
at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options);
hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, DOWN, false, n);
cudaSafeCall(hipGetLastError());
}
// to UP
gradient_accumulation = at::zeros({N, W, 3, C}, options);
for(short y = H - 2; y >= 0; --y)
{
// compute min messages
at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options);
hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, UP, false, n);
cudaSafeCall(hipGetLastError());
}
// to LEFT
gradient_accumulation = at::zeros({N, H, 3, C}, options);
for(short x = W-2; x >= 0; --x)
{
// compute min messages
at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options);
hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, LEFT, true, n);
cudaSafeCall(hipGetLastError());
}
// to RIGHT
gradient_accumulation = at::zeros({N, H, 3, C}, options);
for(short x = 1; x < W; ++x)
{
// compute min messages
at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options);
hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, RIGHT, true, n);
cudaSafeCall(hipGetLastError());
}
}
std::vector<at::Tensor> output_vec;
output_vec.push_back(gradient_unary);
output_vec.push_back(gradient_pairwise);
output_vec.push_back(gradient_edge);
output_vec.push_back(gradient_messages);
return output_vec;
}
} | c128ed338c61dcc4e9b0c344dc4f606c9d63fdbf.cu | // This file is part of bp-layers.
//
// Copyright (C) 2020 Patrick Knöbelreiter <knoebelreiter at icg dot tugraz dot at>
// Christian Sormann <christian dot sormann at icg dot tugraz dot at>
// Institute for Computer Graphics and Vision, Graz University of Technology
// https://www.tugraz.at/institute/icg/teams/team-pock/
//
// bp-layers is free software: you can redistribute it and/or modify it under the
// terms of the GNU Affero General Public License as published by the Free Software
// Foundation, either version 3 of the License, or any later version.
//
// bp-layers is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "../../include/error_util.h"
#include "lbp_min_sum_kernel.cuh"
#include "util.cuh"
// ============================================================================
// CUDA KERNELS
// ============================================================================
__global__ void lbp_cuda_forward_kernel_reduction_min_sum(
KernelData cost,
KernelData jump,
KernelData edges,
KernelData5 messages,
KernelData5 messages_argmin,
KernelData message_scale,
const unsigned short x_in,
const unsigned short direction,
int shared_mem_offset, unsigned short delta)
{
unsigned short y = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y;
unsigned short x = 0;
if(direction == UP || direction == DOWN)
{
x = y;
y = x_in;
}
else
{
x = x_in;
// y = y;
}
// shared memory h
extern __shared__ float sdata[];
// message size is N x 4 x H x W x C
// cost size is N x H x W x C
// edges: N x 1 x H x W
// jumps: 1 x 1 x H x W
const short N = cost.size0;
const short H = cost.size1;
const short W = cost.size2;
const short C = cost.size3;
const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x;
const float max_float = 1e15;
// check inside image
if(c >= C || x >= W || y >= H)
{
// write large number that will never win
sdata[tid] = max_float;
return;
}
unsigned int n = 0;
float L2 = jump(0, direction, 0, jump.size3 - 1);
unsigned short start = max(c - delta + 1, 0);
unsigned short stop = min(c + delta - 1, C - 1);
float edgeWeight = edges(n, direction, y, x);
// write to shared memory
// compute message for every label
sdata[tid] = cost(n, y, x, c);
// add costs from all neighbors
if(direction != RIGHT) { sdata[tid] += messages(n, RIGHT, y, x, c); }
if(direction != LEFT) { sdata[tid] += messages(n, LEFT, y, x, c); }
if(direction != UP) { sdata[tid] += messages(n, UP, y, x, c); }
if(direction != DOWN) { sdata[tid] += messages(n, DOWN, y, x, c); }
float h = sdata[tid];
__syncthreads();
// save h in shared mem
sdata[tid] = h;
sdata[tid + shared_mem_offset] = static_cast<float>(c);
__syncthreads();
// if delta is larger or equal than this threshold use old version as it is a little faster
int old_version_threshold = C;
float msg = 0.0;
int msg_argmin = 0;
// if there is no truncation use old version
if(delta >= old_version_threshold)
{
//OLD VERSION /////////////////////
sdata[tid] = h;
__syncthreads();
msg = max_float; //minVal + jump(0, direction, 0, jump.size3 - 1) * edgeWeight;
msg_argmin = 0;
for(unsigned short label = 0; label < C; ++label)
{
// compute min in local var to avoid global mem writes
float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump(0,direction,label,c) * edgeWeight;
msg = fminf(msg, new_msg);
if(msg == new_msg)
{
msg_argmin = label;
}
}
__syncthreads();
/////////////////
}
else
{
//TRUNC SPEED UP VERSION ///////////////////////////////////
for(unsigned int s=blockDim.y / 2; s > 0; s>>=1)
{
if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C)
{
//min parallel reduction
float min_val = sdata[tid];
float min_label = sdata[tid + shared_mem_offset];
if(sdata[tid + s] <= sdata[tid])
{
min_val = sdata[tid + s];
min_label = sdata[shared_mem_offset + tid + s];
}
//min val parallel reduction
sdata[tid] = min_val;
//argmin prallel reduction
sdata[shared_mem_offset + tid] = min_label;
}
__syncthreads();
}
float min_h = sdata[threadIdx.x * blockDim.y];
int argmin_h = sdata[shared_mem_offset + threadIdx.x * blockDim.y];
__syncthreads();
msg = min_h + jump(0, direction, 0, jump.size3 - 1) * edgeWeight;
msg_argmin = static_cast<int>(argmin_h);
sdata[tid] = h;
__syncthreads();
for(unsigned short label = start; label < stop + 1; ++label)
{
// compute min in local var to avoid global mem writes
float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump(0,direction,label, c) * edgeWeight;
if(new_msg <= msg)
{
msg = new_msg;
msg_argmin = label;
}
}
__syncthreads();
/////////////////////////////
}
// if(x == 2 && y == 0 && direction == DOWN)
// {
// printf("argmin : %i argmin h %i min_val %f min_h %f h: %f \n", msg_argmin, argmin_h, msg, min_h, h);
// }
// compute normalization with 2nd reduction
sdata[tid] = (float)exp((double)msg);
__syncthreads();
for(unsigned int s=blockDim.y / 2; s > 0; s>>=1)
{
if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// normalize message
double sum_exp = max((double)sdata[blockDim.y * threadIdx.x], 1e-45);
float logSumExp = (float)log(sum_exp);
// if(sum_exp < 1e-10)
// {
// printf("sum exp zero: %f , logsumexp: %f msg: %f \n", sum_exp, msg);
// }
//float logSumExp = 0.0;
if(direction == RIGHT)
{
messages(n, LEFT, y, x+1, c) = msg - logSumExp;
messages_argmin(n, LEFT, y, x+1, c) = msg_argmin;
message_scale(n, LEFT, y, x+1) = sum_exp;
}
if(direction == LEFT)
{
messages(n, RIGHT, y, x-1, c) = msg - logSumExp;
messages_argmin(n, RIGHT, y, x-1, c) = msg_argmin;
message_scale(n, RIGHT, y, x-1) = sum_exp;
}
if(direction == UP)
{
messages(n, DOWN, y-1, x, c) = msg - logSumExp;
messages_argmin(n, DOWN, y-1, x, c) = msg_argmin;
message_scale(n, DOWN, y-1, x) = sum_exp;
}
if(direction == DOWN)
{
messages(n, UP, y+1, x, c) = msg - logSumExp;
messages_argmin(n, UP, y+1, x, c) = msg_argmin;
message_scale(n, UP, y+1, x) = sum_exp;
}
}
__global__ void lbp_cuda_backward_kernel_reduction_min_sum(
KernelData cost,
KernelData jump,
KernelData edges,
KernelData5 messages,
KernelData5 messages_argmin,
KernelData message_scale,
KernelData5 in_grad,
KernelData gradient_unary,
KernelData gradient_pairwise,
KernelData gradient_edge,
KernelData gradient_accumulation,
KernelData gradient_accumulation_tmp,
KernelData5 saved_prev_grad_msg,
const unsigned short x_in,
const unsigned short direction,
bool compute_cross,
const unsigned int n)
{
//initialize utility variables
unsigned short y = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y;
//unsigned int n = 0;
unsigned int x;
if(direction == UP || direction == DOWN)
{
x = y;
y = x_in;
}
else
{
x = x_in;
}
// shared memory h
extern __shared__ float sdata[];
// message size is N x 4 x H x W x C
// cost size is N x H x W x C
// edges: N x 1 x H x W
// jumps: 1 x 1 x H x W
const short N = cost.size0;
const short H = cost.size1;
const short W = cost.size2;
const short C = cost.size3;
const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x;
const float max_float = 1e15;
// check inside image
if(c >= C || x >= W || y >= H)
{
// write large number that will never win
sdata[tid] = max_float;
return;
}
//calc backward message
short prev_row_shift = 0;
short prev_col_shift = 0;
if(direction == LEFT)
{
prev_row_shift = 0;
prev_col_shift = 1;
}
if(direction == RIGHT)
{
prev_row_shift = 0;
prev_col_shift = -1;
}
if(direction == DOWN)
{
prev_row_shift = -1;
prev_col_shift = 0;
}
if(direction == UP)
{
prev_row_shift = 1;
prev_col_shift = 0;
}
int grad_xy_idx = 0;
if(direction == UP)
{
grad_xy_idx = DOWN;
}
if(direction == DOWN)
{
grad_xy_idx = UP;
}
if(direction == LEFT)
{
grad_xy_idx = RIGHT;
}
if(direction == RIGHT)
{
grad_xy_idx = LEFT;
}
float edgeWeight = edges(n, grad_xy_idx, y, x);
int HOR_IDX = 0;
int UP_IDX = 1;
int DOWN_IDX = 2;
///////////////////////in_grad normalization ////////////////////////////////////////////
float original_message_val = messages(n, direction, y + prev_row_shift, x + prev_col_shift, c) + log(message_scale(n, direction, y + prev_row_shift, x + prev_col_shift));
float message_exp_sum = message_scale(n, direction, y + prev_row_shift, x + prev_col_shift);
sdata[tid] = in_grad(n, direction, y + prev_row_shift, x + prev_col_shift, c);
__syncthreads();
float in_grad_normalized = 0.0;
// normalization
for(unsigned short label = 0; label < C; ++label)
{
float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val);
if(c == label)
{
J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val);
}
//printf("tid %i label %i norm msg val %f \n", tid, label, norm_msg_val);
//in_grad is in sdata
in_grad_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor;
}
__syncthreads();
///////////////////////acc normalization ////////////////////////////////////////////
sdata[tid] = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX);
__syncthreads();
float acc_normalized = 0.0;
// normalization
for(unsigned short label = 0; label < C; ++label)
{
float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val);
if(c == label)
{
J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val);
}
//in_grad is in sdata
acc_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor;
}
__syncthreads();
/////////////////////////////////
int min_index = (int)messages_argmin(n, direction, y + prev_row_shift, x + prev_col_shift, c);
float additive_hor = in_grad_normalized + acc_normalized;
float additive_up = 0.0;
float additive_down = 0.0;
if(compute_cross)
{
additive_up = saved_prev_grad_msg(n, UP, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, UP_IDX);
additive_down = saved_prev_grad_msg(n, DOWN, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, DOWN_IDX);
}
// so that gradient_acc is not changed before assigning
__syncthreads();
//unary gradient
atomicAdd(&gradient_unary(n, y, x, min_index), additive_hor);
atomicAdd(&gradient_unary(n, y, x, min_index), additive_up);
atomicAdd(&gradient_unary(n, y, x, min_index), additive_down);
//pairwise gradient
atomicAdd(&gradient_pairwise(0, grad_xy_idx, min_index, c), edgeWeight * additive_hor);
atomicAdd(&gradient_pairwise(0, grad_xy_idx, min_index, c), edgeWeight * additive_up);
atomicAdd(&gradient_pairwise(0, grad_xy_idx, min_index, c), edgeWeight * additive_down);
//edge gradient
atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(0, grad_xy_idx, min_index, c) * additive_hor);
atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(0, grad_xy_idx, min_index, c) * additive_up);
atomicAdd(&gradient_edge(0, grad_xy_idx, y, x), jump(0, grad_xy_idx, min_index, c) * additive_down);
updateGradientAcc(gradient_accumulation_tmp, additive_hor, direction, n, y, x, min_index, HOR_IDX);
updateGradientAcc(gradient_accumulation_tmp, additive_up, direction, n, y, x, min_index, UP_IDX);
updateGradientAcc(gradient_accumulation_tmp, additive_down, direction, n, y, x, min_index, DOWN_IDX);
__syncthreads();
setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, HOR_IDX), direction, n, y, x, c, HOR_IDX);
setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, UP_IDX), direction, n, y, x, c, UP_IDX);
setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, DOWN_IDX), direction, n, y, x, c, DOWN_IDX);
__syncthreads();
saved_prev_grad_msg(n, direction, y, x, c) = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX);
}
// ============================================================================
// CPP KERNEL CALLS
// ============================================================================
namespace cuda
{
std::vector<at::Tensor> lbp_reduction_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor messages, unsigned short delta)
{
int N = cost.size(0);
int H = cost.size(1);
int W = cost.size(2);
int C = cost.size(3);
//int max_iter = 2;
auto options = at::TensorOptions(cost.options());
// at::Tensor messages = at::zeros({N, 4, H, W, C}, options);
at::Tensor messages_argmin = at::zeros({N, 4, H, W, C}, options);
at::Tensor message_scale = at::zeros({N, 4, H, W}, options);
//cost = cost.permute({0, 2, 3, 1}).contiguous();
// parallelize over image rows and disparities
// block-size in disparity dimension must be >= number of disparities
// then all the synchronization can be done over blocks (fast)
// otherwise global synchronization is necessary
int blockDimC = static_cast<int>(std::min(powf(2.0f, std::ceil(log2f(C))), 1024.0f));
int blockDimHW = static_cast<int>(std::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f));
// attention: 1024 is maximal number of threads per block!!
const dim3 blockSize(blockDimHW, blockDimC);
const dim3 numBlocksLR(std::ceil(H / static_cast<float>(blockSize.x)),
std::ceil(C / static_cast<float>(blockSize.y)));
const dim3 numBlocksUD(std::ceil(W / static_cast<float>(blockSize.x)),
std::ceil(C / static_cast<float>(blockSize.y)));
if(numBlocksLR.y != 1)
{
std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1=:" << numBlocksLR.y << "C=" << C << std::endl;
}
const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z;
// to Right
for(unsigned short x = 0; x < W - 1; ++x)
{
// compute min messages
lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, x, RIGHT, threadsPerBlock, delta);
cudaSafeCall(cudaGetLastError());
}
// to LEFT
for(unsigned short x = W - 1; x > 0; --x)
{
// compute min messages
lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, x, LEFT, threadsPerBlock, delta);
cudaSafeCall(cudaGetLastError());
}
// to DOWN
for(unsigned short y = 0; y < H - 1; ++y)
{
// compute min messages
lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, y, DOWN, threadsPerBlock, delta);
cudaSafeCall(cudaGetLastError());
}
// to UP
for(unsigned short y = H - 1; y > 0; --y)
{
// compute min messages
lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, y, UP, threadsPerBlock, delta);
cudaSafeCall(cudaGetLastError());
}
//auto beliefs = messages.sum({1}) + cost;
std::vector<at::Tensor> output_vec;
output_vec.push_back(messages);
output_vec.push_back(messages_argmin);
output_vec.push_back(message_scale);
return output_vec;
}
std::vector<at::Tensor> lbp_forward_min_sum(at::Tensor cost,
at::Tensor jump,
at::Tensor edge,
at::Tensor messages, unsigned short delta)
{
return lbp_reduction_min_sum(cost, jump, edge, messages, delta);
}
//=============================================================================
// BACKWARD
//=============================================================================
std::vector<at::Tensor> lbp_backward_min_sum(at::Tensor cost,
at::Tensor jump,
at::Tensor edge,
at::Tensor in_grad,
at::Tensor messages,
at::Tensor messages_argmin,
at::Tensor message_scale)
{
int N = cost.size(0);
int H = cost.size(1);
int W = cost.size(2);
int C = cost.size(3);
auto options = at::TensorOptions(cost.options());
at::Tensor gradient_unary = at::zeros({N, H, W, C}, options);
at::Tensor gradient_pairwise = at::zeros({N, 4, C, C}, options);
at::Tensor gradient_edge = at::zeros({N, 4, H, W}, options);
at::Tensor gradient_messages = at::zeros({N, 4, H, W, C}, options);
gradient_messages += in_grad;
at::Tensor saved_prev_grad_msg = at::zeros({N, 4, H, W, C}, options);
at::Tensor gradient_accumulation;
// parallelize over image rows and disparities
// block-size in disparity dimension must be >= number of disparities
// then all the synchronization can be done over blocks (fast)
// otherwise global synchronization is necessary
int blockDimC = static_cast<int>(std::min(powf(2.0f, std::ceil(log2f(C))), 1024.0f));
int blockDimHW = static_cast<int>(std::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f));
// attention: 1024 is maximal number of threads per block!!
const dim3 blockSize(blockDimHW, blockDimC);
const dim3 numBlocksLR(std::ceil(H / static_cast<float>(blockSize.x)),
std::ceil(C / static_cast<float>(blockSize.y)));
const dim3 numBlocksUD(std::ceil(W / static_cast<float>(blockSize.x)),
std::ceil(C / static_cast<float>(blockSize.y)));
//printf("blockDimC %i \n", blockDimC);
//printf("blockDimHW %i \n", blockDimHW);
if(numBlocksLR.y != 1)
std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1: " << numBlocksLR.y << std::endl;
const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z;
const float max_float = 1e15;
for(int n = 0; n < N; ++n)
{
////////////////////UNARY GRADIENT////////////////////////////
//to DOWN
gradient_accumulation = at::zeros({N, W, 3, C}, options);
for(short y = 1; y < H; ++y)
{
// compute min messages
at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options);
lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, DOWN, false, n);
cudaSafeCall(cudaGetLastError());
}
// to UP
gradient_accumulation = at::zeros({N, W, 3, C}, options);
for(short y = H - 2; y >= 0; --y)
{
// compute min messages
at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options);
lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, UP, false, n);
cudaSafeCall(cudaGetLastError());
}
// to LEFT
gradient_accumulation = at::zeros({N, H, 3, C}, options);
for(short x = W-2; x >= 0; --x)
{
// compute min messages
at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options);
lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, LEFT, true, n);
cudaSafeCall(cudaGetLastError());
}
// to RIGHT
gradient_accumulation = at::zeros({N, H, 3, C}, options);
for(short x = 1; x < W; ++x)
{
// compute min messages
at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options);
lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, RIGHT, true, n);
cudaSafeCall(cudaGetLastError());
}
}
std::vector<at::Tensor> output_vec;
output_vec.push_back(gradient_unary);
output_vec.push_back(gradient_pairwise);
output_vec.push_back(gradient_edge);
output_vec.push_back(gradient_messages);
return output_vec;
}
} |
ddd3c7219fbc304ddd2cca7fe86a46ac004ea50f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <math.h>
#include <cstdio>
#include <sstream>
#include <tuple>
#include "rasterize_points/bitmask.cuh"
#include "rasterize_points/rasterization_utils.cuh"
namespace {
// A little structure for holding details about a pixel.
struct Pix {
float z; // Depth of the reference point.
int32_t idx; // Index of the reference point.
float dist2; // Euclidean distance square to the reference point.
};
__device__ inline bool operator<(const Pix& a, const Pix& b) {
return a.z < b.z;
}
// This function checks if a pixel given by xy location pxy lies within the
// point with index p and batch index n. One of the inputs is a list (q)
// which contains Pixel structs with the indices of the points which intersect
// with this pixel sorted by closest z distance. If the pixel pxy lies in the
// point, the list (q) is updated and re-orderered in place. In addition
// the auxillary variables q_size, q_max_z and q_max_idx are also modified.
// This code is shared between RasterizePointsNaiveCudaKernel and
// RasterizePointsFineCudaKernel.
template <typename PointQ>
__device__ void CheckPixelInsidePoint(
const float* points, // (P, 3)
const int p_idx,
int& q_size,
float& q_max_z,
int& q_max_idx,
PointQ& q,
const float* radius,
const float xf,
const float yf,
const int K) {
const float px = points[p_idx * 3 + 0];
const float py = points[p_idx * 3 + 1];
const float pz = points[p_idx * 3 + 2];
const float p_radius = radius[p_idx];
const float radius2 = p_radius * p_radius;
if (pz < 0)
return; // Don't render points behind the camera
const float dx = xf - px;
const float dy = yf - py;
const float dist2 = dx * dx + dy * dy;
if (dist2 < radius2) {
if (q_size < K) {
// Just insert it
q[q_size] = {pz, p_idx, dist2};
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = q_size;
}
q_size++;
} else if (pz < q_max_z) {
// Overwrite the old max, and find the new max
q[q_max_idx] = {pz, p_idx, dist2};
q_max_z = pz;
for (int i = 0; i < K; i++) {
if (q[i].z > q_max_z) {
q_max_z = q[i].z;
q_max_idx = i;
}
}
}
}
}
} // namespace
// ****************************************************************************
// * NAIVE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsNaiveCudaKernel(
const float* points, // (P, 3)
const int64_t* cloud_to_packed_first_idx, // (N)
const int64_t* num_points_per_cloud, // (N)
const float* radius,
const int N,
const int H,
const int W,
const int K,
int32_t* point_idxs, // (N, H, W, K)
float* zbuf, // (N, H, W, K)
float* pix_dists) { // (N, H, W, K)
// Simple version: One thread per output pixel
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < N * H * W; i += num_threads) {
// Convert linear index to 3D index
const int n = i / (H * W); // Batch index
const int pix_idx = i % (H * W);
// Reverse ordering of the X and Y axis as the camera coordinates
// assume that +Y is pointing up and +X is pointing left.
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
// screen coordinates to ndc coordiantes of pixel.
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
// For keeping track of the K closest points we want a data structure
// that (1) gives O(1) access to the closest point for easy comparisons,
// and (2) allows insertion of new elements. In the CPU version we use
// std::priority_queue; then (2) is O(log K). We can't use STL
// containers in CUDA; we could roll our own max heap in an array, but
// that would likely have a lot of warp divergence so we do something
// simpler instead: keep the elements in an unsorted array, but keep
// track of the max value and the index of the max value. Then (1) is
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
// this should be fast enough for our purposes.
// TODO(jcjohns) Abstract this out into a standalone data structure
Pix q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
// Using the batch index of the thread get the start and stop
// indices for the points.
const int64_t point_start_idx = cloud_to_packed_first_idx[n];
const int64_t point_stop_idx = point_start_idx + num_points_per_cloud[n];
for (int p_idx = point_start_idx; p_idx < point_stop_idx; ++p_idx) {
CheckPixelInsidePoint(
points, p_idx, q_size, q_max_z, q_max_idx, q, radius, xf, yf, K);
}
BubbleSort(q, q_size);
int idx = n * H * W * K + pix_idx * K;
for (int k = 0; k < q_size; ++k) {
point_idxs[idx + k] = q[k].idx;
zbuf[idx + k] = q[k].z;
pix_dists[idx + k] = q[k].dist2;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor> RasterizePointsNaiveCuda(
const at::Tensor& points, // (P. 3)
const at::Tensor& cloud_to_packed_first_idx, // (N)
const at::Tensor& num_points_per_cloud, // (N)
const std::tuple<int, int> image_size,
const at::Tensor& radius,
const int points_per_pixel) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
cloud_to_packed_first_idx_t{
cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2},
num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3};
at::CheckedFrom c = "RasterizePointsNaiveCuda";
at::checkAllSameGPU(
c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(
points.ndimension() == 2 && points.size(1) == 3,
"points must have dimensions (num_points, 3)");
TORCH_CHECK(
num_points_per_cloud.size(0) == cloud_to_packed_first_idx.size(0),
"num_points_per_cloud must have same size first dimension as cloud_to_packed_first_idx");
const int N = num_points_per_cloud.size(0); // batch size.
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int K = points_per_pixel;
if (K > kMaxPointsPerPixel) {
std::stringstream ss;
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
AT_ERROR(ss.str());
}
auto int_opts = num_points_per_cloud.options().dtype(at::kInt);
auto float_opts = points.options().dtype(at::kFloat);
at::Tensor point_idxs = at::full({N, H, W, K}, -1, int_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
if (point_idxs.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizePointsNaiveCudaKernel), dim3(blocks), dim3(threads), 0, stream,
points.contiguous().data_ptr<float>(),
cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(),
num_points_per_cloud.contiguous().data_ptr<int64_t>(),
radius.contiguous().data_ptr<float>(),
N,
H,
W,
K,
point_idxs.contiguous().data_ptr<int32_t>(),
zbuf.contiguous().data_ptr<float>(),
pix_dists.contiguous().data_ptr<float>());
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
// ****************************************************************************
// * COARSE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsCoarseCudaKernel(
const float* points, // (P, 3)
const int64_t* cloud_to_packed_first_idx, // (N)
const int64_t* num_points_per_cloud, // (N)
const float* radius,
const int N,
const int P,
const int H,
const int W,
const int bin_size,
const int chunk_size,
const int max_points_per_bin,
int* points_per_bin,
int* bin_points) {
extern __shared__ char sbuf[];
const int M = max_points_per_bin;
// Integer divide round up
const int num_bins_x = 1 + (W - 1) / bin_size;
const int num_bins_y = 1 + (H - 1) / bin_size;
// NDC range depends on the ratio of W/H
// The shorter side from (H, W) is given an NDC range of 2.0 and
// the other side is scaled by the ratio of H:W.
const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f;
const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f;
// Size of half a pixel in NDC units is the NDC half range
// divided by the corresponding image dimension
const float half_pix_x = NDC_x_half_range / W;
const float half_pix_y = NDC_y_half_range / H;
// This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size)
// stored in shared memory that will track whether each point in the chunk
// falls into each bin of the image.
BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size);
// Have each block handle a chunk of points and build a 3D bitmask in
// shared memory to mark which points hit which bins. In this first phase,
// each thread processes one point at a time. After processing the chunk,
// one thread is assigned per bin, and the thread counts and writes the
// points for the bin out to global memory.
const int chunks_per_batch = 1 + (P - 1) / chunk_size;
const int num_chunks = N * chunks_per_batch;
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
const int batch_idx = chunk / chunks_per_batch;
const int chunk_idx = chunk % chunks_per_batch;
const int point_start_idx = chunk_idx * chunk_size;
binmask.block_clear();
// Using the batch index of the thread get the start and stop
// indices for the points.
const int64_t cloud_point_start_idx = cloud_to_packed_first_idx[batch_idx];
const int64_t cloud_point_stop_idx =
cloud_point_start_idx + num_points_per_cloud[batch_idx];
// Have each thread handle a different point within the chunk
for (int p = threadIdx.x; p < chunk_size; p += blockDim.x) {
const int p_idx = point_start_idx + p;
// Check if point index corresponds to the cloud in the batch given by
// batch_idx.
if (p_idx >= cloud_point_stop_idx || p_idx < cloud_point_start_idx) {
continue;
}
const float px = points[p_idx * 3 + 0];
const float py = points[p_idx * 3 + 1];
const float pz = points[p_idx * 3 + 2];
const float p_radius = radius[p_idx];
if (pz < 0)
continue; // Don't render points behind the camera.
const float px0 = px - p_radius;
const float px1 = px + p_radius;
const float py0 = py - p_radius;
const float py1 = py + p_radius;
// Brute-force search over all bins; TODO something smarter?
// For example we could compute the exact bin where the point falls,
// then check neighboring bins. This way we wouldn't have to check
// all bins (however then we might have more warp divergence?)
for (int by = 0; by < num_bins_y; ++by) {
// Get y extent for the bin. PixToNonSquareNdc gives us the location of
// the center of each pixel, so we need to add/subtract a half
// pixel to get the true extent of the bin.
const float by0 = PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y;
const float by1 =
PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y;
const bool y_overlap = (py0 <= by1) && (by0 <= py1);
if (!y_overlap) {
continue;
}
for (int bx = 0; bx < num_bins_x; ++bx) {
// Get x extent for the bin; again we need to adjust the
// output of PixToNonSquareNdc by half a pixel.
const float bx0 = PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x;
const float bx1 =
PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x;
const bool x_overlap = (px0 <= bx1) && (bx0 <= px1);
if (x_overlap) {
binmask.set(by, bx, p);
}
}
}
}
__syncthreads();
// Now we have processed every point in the current chunk. We need to
// count the number of points in each bin so we can write the indices
// out to global memory. We have each thread handle a different bin.
for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x;
byx += blockDim.x) {
const int by = byx / num_bins_x;
const int bx = byx % num_bins_x;
const int count = binmask.count(by, bx);
const int points_per_bin_idx =
batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx;
// This atomically increments the (global) number of points found
// in the current bin, and gets the previous value of the counter;
// this effectively allocates space in the bin_points array for the
// points in the current chunk that fall into this bin.
const int start = atomicAdd(points_per_bin + points_per_bin_idx, count);
// Now loop over the binmask and write the active bits for this bin
// out to bin_points.
int next_idx = batch_idx * num_bins_y * num_bins_x * M +
by * num_bins_x * M + bx * M + start;
for (int p = 0; p < chunk_size; ++p) {
if (binmask.get(by, bx, p)) {
// TODO: Throw an error if next_idx >= M -- this means that
// we got more than max_points_per_bin in this bin
// TODO: check if atomicAdd is needed in line 265.
bin_points[next_idx] = point_start_idx + p;
next_idx++;
}
}
}
__syncthreads();
}
}
at::Tensor RasterizePointsCoarseCuda(
const at::Tensor& points, // (P, 3)
const at::Tensor& cloud_to_packed_first_idx, // (N)
const at::Tensor& num_points_per_cloud, // (N)
const std::tuple<int, int> image_size,
const at::Tensor& radius,
const int bin_size,
const int max_points_per_bin) {
TORCH_CHECK(
points.ndimension() == 2 && points.size(1) == 3,
"points must have dimensions (num_points, 3)");
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
cloud_to_packed_first_idx_t{
cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2},
num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3};
at::CheckedFrom c = "RasterizePointsCoarseCuda";
at::checkAllSameGPU(
c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int P = points.size(0);
const int N = num_points_per_cloud.size(0);
const int M = max_points_per_bin;
// Integer divide round up.
const int num_bins_y = 1 + (H - 1) / bin_size;
const int num_bins_x = 1 + (W - 1) / bin_size;
if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) {
// Make sure we do not use too much shared memory.
std::stringstream ss;
ss << "In Coarse Rasterizer got num_bins_y: " << num_bins_y
<< ", num_bins_x: " << num_bins_x << ", "
<< "; that's too many!";
AT_ERROR(ss.str());
}
auto opts = num_points_per_cloud.options().dtype(at::kInt);
at::Tensor points_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts);
at::Tensor bin_points = at::full({N, num_bins_y, num_bins_x, M}, -1, opts);
if (bin_points.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return bin_points;
}
const int chunk_size = 512;
const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8;
const size_t blocks = 64;
const size_t threads = 512;
hipLaunchKernelGGL(( RasterizePointsCoarseCudaKernel), dim3(blocks), dim3(threads), shared_size, stream,
points.contiguous().data_ptr<float>(),
cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(),
num_points_per_cloud.contiguous().data_ptr<int64_t>(),
radius.contiguous().data_ptr<float>(),
N,
P,
H,
W,
bin_size,
chunk_size,
M,
points_per_bin.contiguous().data_ptr<int32_t>(),
bin_points.contiguous().data_ptr<int32_t>());
AT_CUDA_CHECK(hipGetLastError());
return bin_points;
}
// ****************************************************************************
// * FINE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsFineCudaKernel(
const float* points, // (P, 3)
const int32_t* bin_points, // (N, BH, BW, T)
const float* radius,
const int bin_size,
const int N,
const int BH, // num_bins y
const int BW, // num_bins x
const int M,
const int H,
const int W,
const int K,
int32_t* point_idxs, // (N, H, W, K)
float* zbuf, // (N, H, W, K)
float* pix_dists) { // (N, H, W, K)
// This can be more than H * W if H or W are not divisible by bin_size.
const int num_pixels = N * BH * BW * bin_size * bin_size;
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int pid = tid; pid < num_pixels; pid += num_threads) {
// Convert linear index into bin and pixel indices. We make the within
// block pixel ids move the fastest, so that adjacent threads will fall
// into the same bin; this should give them coalesced memory reads when
// they read from points and bin_points.
int i = pid;
const int n = i / (BH * BW * bin_size * bin_size);
i %= BH * BW * bin_size * bin_size;
const int by = i / (BW * bin_size * bin_size);
i %= BW * bin_size * bin_size;
const int bx = i / (bin_size * bin_size);
i %= bin_size * bin_size;
const int yi = i / bin_size + by * bin_size;
const int xi = i % bin_size + bx * bin_size;
if (yi >= H || xi >= W)
continue;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
// This part looks like the naive rasterization kernel, except we use
// bin_points to only look at a subset of points already known to fall
// in this bin. TODO abstract out this logic into some data structure
// that is shared by both kernels?
Pix q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
for (int m = 0; m < M; ++m) {
const int p = bin_points[n * BH * BW * M + by * BW * M + bx * M + m];
if (p < 0) {
// bin_points uses -1 as a sentinal value
continue;
}
CheckPixelInsidePoint(
points, p, q_size, q_max_z, q_max_idx, q, radius, xf, yf, K);
}
// Now we've looked at all the points for this bin, so we can write
// output for the current pixel.
BubbleSort(q, q_size);
// Reverse ordering of the X and Y axis as the camera coordinates
// assume that +Y is pointing up and +X is pointing left.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const int pix_idx = n * H * W * K + yidx * W * K + xidx * K;
for (int k = 0; k < q_size; ++k) {
point_idxs[pix_idx + k] = q[k].idx;
zbuf[pix_idx + k] = q[k].z;
pix_dists[pix_idx + k] = q[k].dist2;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor> RasterizePointsFineCuda(
const at::Tensor& points, // (P, 3)
const at::Tensor& bin_points,
const std::tuple<int, int> image_size,
const at::Tensor& radius,
const int bin_size,
const int points_per_pixel) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
bin_points_t{bin_points, "bin_points", 2};
at::CheckedFrom c = "RasterizePointsFineCuda";
at::checkAllSameGPU(c, {points_t, bin_points_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int N = bin_points.size(0);
const int BH = bin_points.size(1);
const int BW = bin_points.size(2);
const int M = bin_points.size(3);
const int K = points_per_pixel;
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
if (K > kMaxPointsPerPixel) {
AT_ERROR("Must have num_closest <= 150");
}
auto int_opts = bin_points.options().dtype(at::kInt);
auto float_opts = points.options().dtype(at::kFloat);
at::Tensor point_idxs = at::full({N, H, W, K}, -1, int_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
if (point_idxs.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizePointsFineCudaKernel), dim3(blocks), dim3(threads), 0, stream,
points.contiguous().data_ptr<float>(),
bin_points.contiguous().data_ptr<int32_t>(),
radius.contiguous().data_ptr<float>(),
bin_size,
N,
BH,
BW,
M,
H,
W,
K,
point_idxs.contiguous().data_ptr<int32_t>(),
zbuf.contiguous().data_ptr<float>(),
pix_dists.contiguous().data_ptr<float>());
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
// ****************************************************************************
// * BACKWARD PASS *
// ****************************************************************************
// TODO(T55115174) Add more documentation for backward kernel.
__global__ void RasterizePointsBackwardCudaKernel(
const float* points, // (P, 3)
const int32_t* idxs, // (N, H, W, K)
const int N,
const int P,
const int H,
const int W,
const int K,
const float* grad_zbuf, // (N, H, W, K)
const float* grad_dists, // (N, H, W, K)
float* grad_points) { // (P, 3)
// Parallelized over each of K points per pixel, for each pixel in images of
// size H * W, for each image in the batch of size N.
int num_threads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < N * H * W * K; i += num_threads) {
// const int n = i / (H * W * K); // batch index (not needed).
const int yxk = i % (H * W * K);
const int yi = yxk / (W * K);
const int xk = yxk % (W * K);
const int xi = xk / K;
// k = xk % K (We don't actually need k, but this would be it.)
// Reverse ordering of X and Y axes.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const float xf = PixToNonSquareNdc(xidx, W, H);
const float yf = PixToNonSquareNdc(yidx, H, W);
const int p = idxs[i];
if (p < 0)
continue;
const float grad_dist2 = grad_dists[i];
const int p_ind = p * 3; // index into packed points tensor
const float px = points[p_ind + 0];
const float py = points[p_ind + 1];
const float dx = px - xf;
const float dy = py - yf;
const float grad_px = 2.0f * grad_dist2 * dx;
const float grad_py = 2.0f * grad_dist2 * dy;
const float grad_pz = grad_zbuf[i];
atomicAdd(grad_points + p_ind + 0, grad_px);
atomicAdd(grad_points + p_ind + 1, grad_py);
atomicAdd(grad_points + p_ind + 2, grad_pz);
}
}
at::Tensor RasterizePointsBackwardCuda(
const at::Tensor& points, // (N, P, 3)
const at::Tensor& idxs, // (N, H, W, K)
const at::Tensor& grad_zbuf, // (N, H, W, K)
const at::Tensor& grad_dists) { // (N, H, W, K)
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1}, idxs_t{idxs, "idxs", 2},
grad_zbuf_t{grad_zbuf, "grad_zbuf", 3},
grad_dists_t{grad_dists, "grad_dists", 4};
at::CheckedFrom c = "RasterizePointsBackwardCuda";
at::checkAllSameGPU(c, {points_t, idxs_t, grad_zbuf_t, grad_dists_t});
at::checkAllSameType(c, {points_t, grad_zbuf_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int P = points.size(0);
const int N = idxs.size(0);
const int H = idxs.size(1);
const int W = idxs.size(2);
const int K = idxs.size(3);
at::Tensor grad_points = at::zeros({P, 3}, points.options());
if (grad_points.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_points;
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizePointsBackwardCudaKernel), dim3(blocks), dim3(threads), 0, stream,
points.contiguous().data_ptr<float>(),
idxs.contiguous().data_ptr<int32_t>(),
N,
P,
H,
W,
K,
grad_zbuf.contiguous().data_ptr<float>(),
grad_dists.contiguous().data_ptr<float>(),
grad_points.contiguous().data_ptr<float>());
AT_CUDA_CHECK(hipGetLastError());
return grad_points;
}
| ddd3c7219fbc304ddd2cca7fe86a46ac004ea50f.cu | // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <math.h>
#include <cstdio>
#include <sstream>
#include <tuple>
#include "rasterize_points/bitmask.cuh"
#include "rasterize_points/rasterization_utils.cuh"
namespace {
// A little structure for holding details about a pixel.
struct Pix {
float z; // Depth of the reference point.
int32_t idx; // Index of the reference point.
float dist2; // Euclidean distance square to the reference point.
};
__device__ inline bool operator<(const Pix& a, const Pix& b) {
return a.z < b.z;
}
// This function checks if a pixel given by xy location pxy lies within the
// point with index p and batch index n. One of the inputs is a list (q)
// which contains Pixel structs with the indices of the points which intersect
// with this pixel sorted by closest z distance. If the pixel pxy lies in the
// point, the list (q) is updated and re-orderered in place. In addition
// the auxillary variables q_size, q_max_z and q_max_idx are also modified.
// This code is shared between RasterizePointsNaiveCudaKernel and
// RasterizePointsFineCudaKernel.
template <typename PointQ>
__device__ void CheckPixelInsidePoint(
const float* points, // (P, 3)
const int p_idx,
int& q_size,
float& q_max_z,
int& q_max_idx,
PointQ& q,
const float* radius,
const float xf,
const float yf,
const int K) {
const float px = points[p_idx * 3 + 0];
const float py = points[p_idx * 3 + 1];
const float pz = points[p_idx * 3 + 2];
const float p_radius = radius[p_idx];
const float radius2 = p_radius * p_radius;
if (pz < 0)
return; // Don't render points behind the camera
const float dx = xf - px;
const float dy = yf - py;
const float dist2 = dx * dx + dy * dy;
if (dist2 < radius2) {
if (q_size < K) {
// Just insert it
q[q_size] = {pz, p_idx, dist2};
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = q_size;
}
q_size++;
} else if (pz < q_max_z) {
// Overwrite the old max, and find the new max
q[q_max_idx] = {pz, p_idx, dist2};
q_max_z = pz;
for (int i = 0; i < K; i++) {
if (q[i].z > q_max_z) {
q_max_z = q[i].z;
q_max_idx = i;
}
}
}
}
}
} // namespace
// ****************************************************************************
// * NAIVE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsNaiveCudaKernel(
const float* points, // (P, 3)
const int64_t* cloud_to_packed_first_idx, // (N)
const int64_t* num_points_per_cloud, // (N)
const float* radius,
const int N,
const int H,
const int W,
const int K,
int32_t* point_idxs, // (N, H, W, K)
float* zbuf, // (N, H, W, K)
float* pix_dists) { // (N, H, W, K)
// Simple version: One thread per output pixel
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < N * H * W; i += num_threads) {
// Convert linear index to 3D index
const int n = i / (H * W); // Batch index
const int pix_idx = i % (H * W);
// Reverse ordering of the X and Y axis as the camera coordinates
// assume that +Y is pointing up and +X is pointing left.
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
// screen coordinates to ndc coordiantes of pixel.
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
// For keeping track of the K closest points we want a data structure
// that (1) gives O(1) access to the closest point for easy comparisons,
// and (2) allows insertion of new elements. In the CPU version we use
// std::priority_queue; then (2) is O(log K). We can't use STL
// containers in CUDA; we could roll our own max heap in an array, but
// that would likely have a lot of warp divergence so we do something
// simpler instead: keep the elements in an unsorted array, but keep
// track of the max value and the index of the max value. Then (1) is
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
// this should be fast enough for our purposes.
// TODO(jcjohns) Abstract this out into a standalone data structure
Pix q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
// Using the batch index of the thread get the start and stop
// indices for the points.
const int64_t point_start_idx = cloud_to_packed_first_idx[n];
const int64_t point_stop_idx = point_start_idx + num_points_per_cloud[n];
for (int p_idx = point_start_idx; p_idx < point_stop_idx; ++p_idx) {
CheckPixelInsidePoint(
points, p_idx, q_size, q_max_z, q_max_idx, q, radius, xf, yf, K);
}
BubbleSort(q, q_size);
int idx = n * H * W * K + pix_idx * K;
for (int k = 0; k < q_size; ++k) {
point_idxs[idx + k] = q[k].idx;
zbuf[idx + k] = q[k].z;
pix_dists[idx + k] = q[k].dist2;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor> RasterizePointsNaiveCuda(
const at::Tensor& points, // (P. 3)
const at::Tensor& cloud_to_packed_first_idx, // (N)
const at::Tensor& num_points_per_cloud, // (N)
const std::tuple<int, int> image_size,
const at::Tensor& radius,
const int points_per_pixel) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
cloud_to_packed_first_idx_t{
cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2},
num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3};
at::CheckedFrom c = "RasterizePointsNaiveCuda";
at::checkAllSameGPU(
c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(
points.ndimension() == 2 && points.size(1) == 3,
"points must have dimensions (num_points, 3)");
TORCH_CHECK(
num_points_per_cloud.size(0) == cloud_to_packed_first_idx.size(0),
"num_points_per_cloud must have same size first dimension as cloud_to_packed_first_idx");
const int N = num_points_per_cloud.size(0); // batch size.
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int K = points_per_pixel;
if (K > kMaxPointsPerPixel) {
std::stringstream ss;
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
AT_ERROR(ss.str());
}
auto int_opts = num_points_per_cloud.options().dtype(at::kInt);
auto float_opts = points.options().dtype(at::kFloat);
at::Tensor point_idxs = at::full({N, H, W, K}, -1, int_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
if (point_idxs.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizePointsNaiveCudaKernel<<<blocks, threads, 0, stream>>>(
points.contiguous().data_ptr<float>(),
cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(),
num_points_per_cloud.contiguous().data_ptr<int64_t>(),
radius.contiguous().data_ptr<float>(),
N,
H,
W,
K,
point_idxs.contiguous().data_ptr<int32_t>(),
zbuf.contiguous().data_ptr<float>(),
pix_dists.contiguous().data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
// ****************************************************************************
// * COARSE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsCoarseCudaKernel(
const float* points, // (P, 3)
const int64_t* cloud_to_packed_first_idx, // (N)
const int64_t* num_points_per_cloud, // (N)
const float* radius,
const int N,
const int P,
const int H,
const int W,
const int bin_size,
const int chunk_size,
const int max_points_per_bin,
int* points_per_bin,
int* bin_points) {
extern __shared__ char sbuf[];
const int M = max_points_per_bin;
// Integer divide round up
const int num_bins_x = 1 + (W - 1) / bin_size;
const int num_bins_y = 1 + (H - 1) / bin_size;
// NDC range depends on the ratio of W/H
// The shorter side from (H, W) is given an NDC range of 2.0 and
// the other side is scaled by the ratio of H:W.
const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f;
const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f;
// Size of half a pixel in NDC units is the NDC half range
// divided by the corresponding image dimension
const float half_pix_x = NDC_x_half_range / W;
const float half_pix_y = NDC_y_half_range / H;
// This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size)
// stored in shared memory that will track whether each point in the chunk
// falls into each bin of the image.
BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size);
// Have each block handle a chunk of points and build a 3D bitmask in
// shared memory to mark which points hit which bins. In this first phase,
// each thread processes one point at a time. After processing the chunk,
// one thread is assigned per bin, and the thread counts and writes the
// points for the bin out to global memory.
const int chunks_per_batch = 1 + (P - 1) / chunk_size;
const int num_chunks = N * chunks_per_batch;
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
const int batch_idx = chunk / chunks_per_batch;
const int chunk_idx = chunk % chunks_per_batch;
const int point_start_idx = chunk_idx * chunk_size;
binmask.block_clear();
// Using the batch index of the thread get the start and stop
// indices for the points.
const int64_t cloud_point_start_idx = cloud_to_packed_first_idx[batch_idx];
const int64_t cloud_point_stop_idx =
cloud_point_start_idx + num_points_per_cloud[batch_idx];
// Have each thread handle a different point within the chunk
for (int p = threadIdx.x; p < chunk_size; p += blockDim.x) {
const int p_idx = point_start_idx + p;
// Check if point index corresponds to the cloud in the batch given by
// batch_idx.
if (p_idx >= cloud_point_stop_idx || p_idx < cloud_point_start_idx) {
continue;
}
const float px = points[p_idx * 3 + 0];
const float py = points[p_idx * 3 + 1];
const float pz = points[p_idx * 3 + 2];
const float p_radius = radius[p_idx];
if (pz < 0)
continue; // Don't render points behind the camera.
const float px0 = px - p_radius;
const float px1 = px + p_radius;
const float py0 = py - p_radius;
const float py1 = py + p_radius;
// Brute-force search over all bins; TODO something smarter?
// For example we could compute the exact bin where the point falls,
// then check neighboring bins. This way we wouldn't have to check
// all bins (however then we might have more warp divergence?)
for (int by = 0; by < num_bins_y; ++by) {
// Get y extent for the bin. PixToNonSquareNdc gives us the location of
// the center of each pixel, so we need to add/subtract a half
// pixel to get the true extent of the bin.
const float by0 = PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y;
const float by1 =
PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y;
const bool y_overlap = (py0 <= by1) && (by0 <= py1);
if (!y_overlap) {
continue;
}
for (int bx = 0; bx < num_bins_x; ++bx) {
// Get x extent for the bin; again we need to adjust the
// output of PixToNonSquareNdc by half a pixel.
const float bx0 = PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x;
const float bx1 =
PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x;
const bool x_overlap = (px0 <= bx1) && (bx0 <= px1);
if (x_overlap) {
binmask.set(by, bx, p);
}
}
}
}
__syncthreads();
// Now we have processed every point in the current chunk. We need to
// count the number of points in each bin so we can write the indices
// out to global memory. We have each thread handle a different bin.
for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x;
byx += blockDim.x) {
const int by = byx / num_bins_x;
const int bx = byx % num_bins_x;
const int count = binmask.count(by, bx);
const int points_per_bin_idx =
batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx;
// This atomically increments the (global) number of points found
// in the current bin, and gets the previous value of the counter;
// this effectively allocates space in the bin_points array for the
// points in the current chunk that fall into this bin.
const int start = atomicAdd(points_per_bin + points_per_bin_idx, count);
// Now loop over the binmask and write the active bits for this bin
// out to bin_points.
int next_idx = batch_idx * num_bins_y * num_bins_x * M +
by * num_bins_x * M + bx * M + start;
for (int p = 0; p < chunk_size; ++p) {
if (binmask.get(by, bx, p)) {
// TODO: Throw an error if next_idx >= M -- this means that
// we got more than max_points_per_bin in this bin
// TODO: check if atomicAdd is needed in line 265.
bin_points[next_idx] = point_start_idx + p;
next_idx++;
}
}
}
__syncthreads();
}
}
at::Tensor RasterizePointsCoarseCuda(
const at::Tensor& points, // (P, 3)
const at::Tensor& cloud_to_packed_first_idx, // (N)
const at::Tensor& num_points_per_cloud, // (N)
const std::tuple<int, int> image_size,
const at::Tensor& radius,
const int bin_size,
const int max_points_per_bin) {
TORCH_CHECK(
points.ndimension() == 2 && points.size(1) == 3,
"points must have dimensions (num_points, 3)");
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
cloud_to_packed_first_idx_t{
cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2},
num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3};
at::CheckedFrom c = "RasterizePointsCoarseCuda";
at::checkAllSameGPU(
c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int P = points.size(0);
const int N = num_points_per_cloud.size(0);
const int M = max_points_per_bin;
// Integer divide round up.
const int num_bins_y = 1 + (H - 1) / bin_size;
const int num_bins_x = 1 + (W - 1) / bin_size;
if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) {
// Make sure we do not use too much shared memory.
std::stringstream ss;
ss << "In Coarse Rasterizer got num_bins_y: " << num_bins_y
<< ", num_bins_x: " << num_bins_x << ", "
<< "; that's too many!";
AT_ERROR(ss.str());
}
auto opts = num_points_per_cloud.options().dtype(at::kInt);
at::Tensor points_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts);
at::Tensor bin_points = at::full({N, num_bins_y, num_bins_x, M}, -1, opts);
if (bin_points.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return bin_points;
}
const int chunk_size = 512;
const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8;
const size_t blocks = 64;
const size_t threads = 512;
RasterizePointsCoarseCudaKernel<<<blocks, threads, shared_size, stream>>>(
points.contiguous().data_ptr<float>(),
cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(),
num_points_per_cloud.contiguous().data_ptr<int64_t>(),
radius.contiguous().data_ptr<float>(),
N,
P,
H,
W,
bin_size,
chunk_size,
M,
points_per_bin.contiguous().data_ptr<int32_t>(),
bin_points.contiguous().data_ptr<int32_t>());
AT_CUDA_CHECK(cudaGetLastError());
return bin_points;
}
// ****************************************************************************
// * FINE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizePointsFineCudaKernel(
const float* points, // (P, 3)
const int32_t* bin_points, // (N, BH, BW, T)
const float* radius,
const int bin_size,
const int N,
const int BH, // num_bins y
const int BW, // num_bins x
const int M,
const int H,
const int W,
const int K,
int32_t* point_idxs, // (N, H, W, K)
float* zbuf, // (N, H, W, K)
float* pix_dists) { // (N, H, W, K)
// This can be more than H * W if H or W are not divisible by bin_size.
const int num_pixels = N * BH * BW * bin_size * bin_size;
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int pid = tid; pid < num_pixels; pid += num_threads) {
// Convert linear index into bin and pixel indices. We make the within
// block pixel ids move the fastest, so that adjacent threads will fall
// into the same bin; this should give them coalesced memory reads when
// they read from points and bin_points.
int i = pid;
const int n = i / (BH * BW * bin_size * bin_size);
i %= BH * BW * bin_size * bin_size;
const int by = i / (BW * bin_size * bin_size);
i %= BW * bin_size * bin_size;
const int bx = i / (bin_size * bin_size);
i %= bin_size * bin_size;
const int yi = i / bin_size + by * bin_size;
const int xi = i % bin_size + bx * bin_size;
if (yi >= H || xi >= W)
continue;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
// This part looks like the naive rasterization kernel, except we use
// bin_points to only look at a subset of points already known to fall
// in this bin. TODO abstract out this logic into some data structure
// that is shared by both kernels?
Pix q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
for (int m = 0; m < M; ++m) {
const int p = bin_points[n * BH * BW * M + by * BW * M + bx * M + m];
if (p < 0) {
// bin_points uses -1 as a sentinal value
continue;
}
CheckPixelInsidePoint(
points, p, q_size, q_max_z, q_max_idx, q, radius, xf, yf, K);
}
// Now we've looked at all the points for this bin, so we can write
// output for the current pixel.
BubbleSort(q, q_size);
// Reverse ordering of the X and Y axis as the camera coordinates
// assume that +Y is pointing up and +X is pointing left.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const int pix_idx = n * H * W * K + yidx * W * K + xidx * K;
for (int k = 0; k < q_size; ++k) {
point_idxs[pix_idx + k] = q[k].idx;
zbuf[pix_idx + k] = q[k].z;
pix_dists[pix_idx + k] = q[k].dist2;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor> RasterizePointsFineCuda(
const at::Tensor& points, // (P, 3)
const at::Tensor& bin_points,
const std::tuple<int, int> image_size,
const at::Tensor& radius,
const int bin_size,
const int points_per_pixel) {
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
bin_points_t{bin_points, "bin_points", 2};
at::CheckedFrom c = "RasterizePointsFineCuda";
at::checkAllSameGPU(c, {points_t, bin_points_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int N = bin_points.size(0);
const int BH = bin_points.size(1);
const int BW = bin_points.size(2);
const int M = bin_points.size(3);
const int K = points_per_pixel;
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
if (K > kMaxPointsPerPixel) {
AT_ERROR("Must have num_closest <= 150");
}
auto int_opts = bin_points.options().dtype(at::kInt);
auto float_opts = points.options().dtype(at::kFloat);
at::Tensor point_idxs = at::full({N, H, W, K}, -1, int_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
if (point_idxs.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizePointsFineCudaKernel<<<blocks, threads, 0, stream>>>(
points.contiguous().data_ptr<float>(),
bin_points.contiguous().data_ptr<int32_t>(),
radius.contiguous().data_ptr<float>(),
bin_size,
N,
BH,
BW,
M,
H,
W,
K,
point_idxs.contiguous().data_ptr<int32_t>(),
zbuf.contiguous().data_ptr<float>(),
pix_dists.contiguous().data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(point_idxs, zbuf, pix_dists);
}
// ****************************************************************************
// * BACKWARD PASS *
// ****************************************************************************
// TODO(T55115174) Add more documentation for backward kernel.
__global__ void RasterizePointsBackwardCudaKernel(
const float* points, // (P, 3)
const int32_t* idxs, // (N, H, W, K)
const int N,
const int P,
const int H,
const int W,
const int K,
const float* grad_zbuf, // (N, H, W, K)
const float* grad_dists, // (N, H, W, K)
float* grad_points) { // (P, 3)
// Parallelized over each of K points per pixel, for each pixel in images of
// size H * W, for each image in the batch of size N.
int num_threads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = tid; i < N * H * W * K; i += num_threads) {
// const int n = i / (H * W * K); // batch index (not needed).
const int yxk = i % (H * W * K);
const int yi = yxk / (W * K);
const int xk = yxk % (W * K);
const int xi = xk / K;
// k = xk % K (We don't actually need k, but this would be it.)
// Reverse ordering of X and Y axes.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const float xf = PixToNonSquareNdc(xidx, W, H);
const float yf = PixToNonSquareNdc(yidx, H, W);
const int p = idxs[i];
if (p < 0)
continue;
const float grad_dist2 = grad_dists[i];
const int p_ind = p * 3; // index into packed points tensor
const float px = points[p_ind + 0];
const float py = points[p_ind + 1];
const float dx = px - xf;
const float dy = py - yf;
const float grad_px = 2.0f * grad_dist2 * dx;
const float grad_py = 2.0f * grad_dist2 * dy;
const float grad_pz = grad_zbuf[i];
atomicAdd(grad_points + p_ind + 0, grad_px);
atomicAdd(grad_points + p_ind + 1, grad_py);
atomicAdd(grad_points + p_ind + 2, grad_pz);
}
}
at::Tensor RasterizePointsBackwardCuda(
const at::Tensor& points, // (N, P, 3)
const at::Tensor& idxs, // (N, H, W, K)
const at::Tensor& grad_zbuf, // (N, H, W, K)
const at::Tensor& grad_dists) { // (N, H, W, K)
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1}, idxs_t{idxs, "idxs", 2},
grad_zbuf_t{grad_zbuf, "grad_zbuf", 3},
grad_dists_t{grad_dists, "grad_dists", 4};
at::CheckedFrom c = "RasterizePointsBackwardCuda";
at::checkAllSameGPU(c, {points_t, idxs_t, grad_zbuf_t, grad_dists_t});
at::checkAllSameType(c, {points_t, grad_zbuf_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int P = points.size(0);
const int N = idxs.size(0);
const int H = idxs.size(1);
const int W = idxs.size(2);
const int K = idxs.size(3);
at::Tensor grad_points = at::zeros({P, 3}, points.options());
if (grad_points.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_points;
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizePointsBackwardCudaKernel<<<blocks, threads, 0, stream>>>(
points.contiguous().data_ptr<float>(),
idxs.contiguous().data_ptr<int32_t>(),
N,
P,
H,
W,
K,
grad_zbuf.contiguous().data_ptr<float>(),
grad_dists.contiguous().data_ptr<float>(),
grad_points.contiguous().data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return grad_points;
}
|
9d96b83f511b527a0214d5e15ede3ac133a47805.hip | // !!! This is a file automatically generated by hipify!!!
/*
Print all basic device informations you could need.
*/
#include <cstdio>
#include <hip/hip_runtime_api.h>
// To prevent calls from errors
#define CUDA_SAFE_CALL_NO_SYNC(x) \
do { \
hipError_t err = x; \
if (err != hipSuccess) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} \
} while(0)
void deviceInfo() {
int deviceCount;
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
for(int dev=0 ; dev < deviceCount ; dev++){
CUDA_SAFE_CALL_NO_SYNC( hipGetDeviceProperties(&deviceProp, dev) );
printf(" Device Number: %d\n" , dev);
printf(" Name: %s\n" , deviceProp.name);
//printf(" Unique identifier %lu\n" , deviceProp.uuid);
printf(" Total Global memory (bytes) %-lu\n" , deviceProp.totalGlobalMem);
printf(" Shared memory per block (bytes) %-10lu\n" , deviceProp.sharedMemPerBlock);
printf(" Maximum 32-bits registers per block %d\n" , deviceProp.regsPerBlock);
printf(" Warp size (threads) %d\n" , deviceProp.warpSize);
printf(" Max threads per block %d\n" , deviceProp.maxThreadsPerBlock);
printf(" Max threads in x dimension %d\n" , deviceProp.maxThreadsDim[0]);
printf(" Max threads in y dimension %d\n" , deviceProp.maxThreadsDim[1]);
printf(" Max threads in z dimension %d\n" , deviceProp.maxThreadsDim[2]);
printf(" Where x*y*z <= 1024\n" );
printf(" Max grid size x (block) %d\n" , deviceProp.maxGridSize[0]);
printf(" Max grid size y (block) %d\n" , deviceProp.maxGridSize[1]);
printf(" Max grid size z (block) %d\n" , deviceProp.maxGridSize[2]);
printf(" Clock rate (kHZ) %d\n" , deviceProp.clockRate);
printf(" Total constant memory (bytes) %lu\n" , deviceProp.totalConstMem);
printf(" Major %d\n" , deviceProp.major);
printf(" Minor %d\n" , deviceProp.minor);
printf(" Multiprocessor count %d\n" , deviceProp.multiProcessorCount);
printf(" Run time limit for kernel execution %d\n" , deviceProp.kernelExecTimeoutEnabled );
printf(" Integrated GPU (motherboard) %d\n" , deviceProp.integrated);
printf(" Mapping host mem into CUDA adress space %d\n" , deviceProp.canMapHostMemory);
printf(" Compute mode %d\n" , deviceProp.computeMode);
printf(" Memory Clock Rate (KHz): %d\n" , deviceProp.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n" , deviceProp.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n" , 2.0*deviceProp.memoryClockRate*(deviceProp.memoryBusWidth/8)/1.0e6); //Double rate memory explain the x2
printf(" L2 cache size (bytes) %d\n" , deviceProp.l2CacheSize);
printf(" Max resident threads per multiprocessor %d\n" , deviceProp.maxThreadsPerMultiProcessor);
printf(" Supports caching globals in L1 %d\n" , deviceProp.globalL1CacheSupported);
printf(" Supports caching locals in L1 %d\n" , deviceProp.localL1CacheSupported);
printf(" Shared memory per MP (bytes) %lu\n" , deviceProp.sharedMemPerMultiprocessor); //Shared by all threads blocks simultaneously resident on a multiprocessor
printf(" 32-bits registers per MP %d\n" , deviceProp.regsPerMultiprocessor); //Shared by all threads blocks simultaneously resident on a multiprocessor
}
}
int main(int argc, char *argv[]){
deviceInfo();
return 0;
}
| 9d96b83f511b527a0214d5e15ede3ac133a47805.cu | /*
Print all basic device informations you could need.
*/
#include <cstdio>
#include <cuda_runtime_api.h>
// To prevent calls from errors
#define CUDA_SAFE_CALL_NO_SYNC(x) \
do { \
cudaError_t err = x; \
if (err != cudaSuccess) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} \
} while(0)
void deviceInfo() {
int deviceCount;
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
for(int dev=0 ; dev < deviceCount ; dev++){
CUDA_SAFE_CALL_NO_SYNC( cudaGetDeviceProperties(&deviceProp, dev) );
printf(" Device Number: %d\n" , dev);
printf(" Name: %s\n" , deviceProp.name);
//printf(" Unique identifier %lu\n" , deviceProp.uuid);
printf(" Total Global memory (bytes) %-lu\n" , deviceProp.totalGlobalMem);
printf(" Shared memory per block (bytes) %-10lu\n" , deviceProp.sharedMemPerBlock);
printf(" Maximum 32-bits registers per block %d\n" , deviceProp.regsPerBlock);
printf(" Warp size (threads) %d\n" , deviceProp.warpSize);
printf(" Max threads per block %d\n" , deviceProp.maxThreadsPerBlock);
printf(" Max threads in x dimension %d\n" , deviceProp.maxThreadsDim[0]);
printf(" Max threads in y dimension %d\n" , deviceProp.maxThreadsDim[1]);
printf(" Max threads in z dimension %d\n" , deviceProp.maxThreadsDim[2]);
printf(" Where x*y*z <= 1024\n" );
printf(" Max grid size x (block) %d\n" , deviceProp.maxGridSize[0]);
printf(" Max grid size y (block) %d\n" , deviceProp.maxGridSize[1]);
printf(" Max grid size z (block) %d\n" , deviceProp.maxGridSize[2]);
printf(" Clock rate (kHZ) %d\n" , deviceProp.clockRate);
printf(" Total constant memory (bytes) %lu\n" , deviceProp.totalConstMem);
printf(" Major %d\n" , deviceProp.major);
printf(" Minor %d\n" , deviceProp.minor);
printf(" Multiprocessor count %d\n" , deviceProp.multiProcessorCount);
printf(" Run time limit for kernel execution %d\n" , deviceProp.kernelExecTimeoutEnabled );
printf(" Integrated GPU (motherboard) %d\n" , deviceProp.integrated);
printf(" Mapping host mem into CUDA adress space %d\n" , deviceProp.canMapHostMemory);
printf(" Compute mode %d\n" , deviceProp.computeMode);
printf(" Memory Clock Rate (KHz): %d\n" , deviceProp.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n" , deviceProp.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n" , 2.0*deviceProp.memoryClockRate*(deviceProp.memoryBusWidth/8)/1.0e6); //Double rate memory explain the x2
printf(" L2 cache size (bytes) %d\n" , deviceProp.l2CacheSize);
printf(" Max resident threads per multiprocessor %d\n" , deviceProp.maxThreadsPerMultiProcessor);
printf(" Supports caching globals in L1 %d\n" , deviceProp.globalL1CacheSupported);
printf(" Supports caching locals in L1 %d\n" , deviceProp.localL1CacheSupported);
printf(" Shared memory per MP (bytes) %lu\n" , deviceProp.sharedMemPerMultiprocessor); //Shared by all threads blocks simultaneously resident on a multiprocessor
printf(" 32-bits registers per MP %d\n" , deviceProp.regsPerMultiprocessor); //Shared by all threads blocks simultaneously resident on a multiprocessor
}
}
int main(int argc, char *argv[]){
deviceInfo();
return 0;
}
|
3c163aeedd8f1d5bef8983d2af536e2a8a05c8c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %run_test hipify "%s" "%t" %cuda_args
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//
// This sample demonstrates the use of streams for concurrent execution. It also illustrates how to
// introduce dependencies between CUDA streams with the new hipStreamWaitEvent function introduced
// in CUDA 3.2.
//
// Devices of compute capability 1.x will run the kernels one after another
// Devices of compute capability 2.0 or higher can overlap the kernels
//
#include <stdio.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// This is a kernel that does no real work but runs at least for a specified number of clocks
__global__ void clock_block(clock_t *d_o, clock_t clock_count)
{
unsigned int start_clock = (unsigned int) clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
unsigned int end_clock = (unsigned int) clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
// Single warp reduction kernel
__global__ void sum(clock_t *d_clocks, int N)
{
__shared__ clock_t s_clocks[32];
clock_t my_sum = 0;
for (int i = threadIdx.x; i < N; i+= blockDim.x)
{
my_sum += d_clocks[i];
}
s_clocks[threadIdx.x] = my_sum;
syncthreads();
for (int i=16; i>0; i/=2)
{
if (threadIdx.x < i)
{
s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i];
}
syncthreads();
}
d_clocks[0] = s_clocks[0];
}
int main(int argc, char **argv)
{
int nkernels = 8; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int nbytes = nkernels * sizeof(clock_t); // number of data bytes
float kernel_time = 10; // time the kernel should run in ms
float elapsed_time; // timing variables
int cuda_device = 0;
printf("[%s] - Starting...\n", argv[0]);
// get number of kernels if overridden on the command line
if (checkCmdLineFlag(argc, (const char **)argv, "nkernels"))
{
nkernels = getCmdLineArgumentInt(argc, (const char **)argv, "nkernels");
nstreams = nkernels + 1;
}
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
// CHECK: hipDeviceProp_t deviceProp;
hipDeviceProp_t deviceProp;
// CHECK: checkCudaErrors(hipGetDevice(&cuda_device));
checkCudaErrors(hipGetDevice(&cuda_device));
// CHECK: checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
if ((deviceProp.concurrentKernels == 0))
{
printf("> GPU does not support concurrent kernel execution\n");
printf(" CUDA kernel runs will be serialized\n");
}
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// allocate host memory
clock_t *a = 0; // pointer to the array data in host memory
// CHECK: checkCudaErrors(hipHostMalloc((void **)&a, nbytes));
checkCudaErrors(hipHostMalloc((void **)&a, nbytes));
// allocate device memory
clock_t *d_a = 0; // pointers to data and init value in the device memory
// CHECK: checkCudaErrors(hipMalloc((void **)&d_a, nbytes));
checkCudaErrors(hipMalloc((void **)&d_a, nbytes));
// CHECK: hipStream_t *streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
// allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
for (int i = 0; i < nstreams; i++)
{
// CHECK: checkCudaErrors(hipStreamCreate(&(streams[i])));
checkCudaErrors(hipStreamCreate(&(streams[i])));
}
// CHECK: hipEvent_t start_event, stop_event;
// create CUDA event handles
hipEvent_t start_event, stop_event;
// CHECK: checkCudaErrors(hipEventCreate(&start_event));
// CHECK: checkCudaErrors(hipEventCreate(&stop_event));
checkCudaErrors(hipEventCreate(&start_event));
checkCudaErrors(hipEventCreate(&stop_event));
// the events are used for synchronization only and hence do not need to record timings
// this also makes events not introduce global sync points when recorded which is critical to get overlap
// CHECK: hipEvent_t *kernelEvent;
// CHECK: kernelEvent = (hipEvent_t *) malloc(nkernels * sizeof(hipEvent_t));
hipEvent_t *kernelEvent;
kernelEvent = (hipEvent_t *) malloc(nkernels * sizeof(hipEvent_t));
for (int i = 0; i < nkernels; i++)
{
// CHECK: checkCudaErrors(hipEventCreateWithFlags(&(kernelEvent[i]), hipEventDisableTiming));
checkCudaErrors(hipEventCreateWithFlags(&(kernelEvent[i]), hipEventDisableTiming));
}
//////////////////////////////////////////////////////////////////////
// time execution with nkernels streams
clock_t total_clocks = 0;
#if defined(__arm__) || defined(__aarch64__)
// the kernel takes more time than the channel reset time on arm archs, so to prevent hangs reduce time_clocks.
clock_t time_clocks = (clock_t)(kernel_time * (deviceProp.clockRate / 1000));
#else
clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate);
#endif
// CHECK: hipEventRecord(start_event, 0);
hipEventRecord(start_event, 0);
// queue nkernels in separate streams and record when they are done
for (int i=0; i<nkernels; ++i)
{
// CHECK: hipLaunchKernelGGL(clock_block, dim3(1), dim3(1), 0, streams[i], &d_a[i], time_clocks);
hipLaunchKernelGGL(( clock_block), dim3(1),dim3(1),0,streams[i], &d_a[i], time_clocks);
total_clocks += time_clocks;
// CHECK: checkCudaErrors(hipEventRecord(kernelEvent[i], streams[i]));
checkCudaErrors(hipEventRecord(kernelEvent[i], streams[i]));
// make the last stream wait for the kernel event to be recorded
// CHECK: checkCudaErrors(hipStreamWaitEvent(streams[nstreams-1], kernelEvent[i],0));
checkCudaErrors(hipStreamWaitEvent(streams[nstreams-1], kernelEvent[i],0));
}
// queue a sum kernel and a copy back to host in the last stream.
// the commands in this stream get dispatched as soon as all the kernel events have been recorded
// CHECK: hipLaunchKernelGGL(sum, dim3(1), dim3(32), 0, streams[nstreams-1], d_a, nkernels);
// CHECK: checkCudaErrors(hipMemcpyAsync(a, d_a, sizeof(clock_t), hipMemcpyDeviceToHost, streams[nstreams-1]));
hipLaunchKernelGGL(( sum), dim3(1),dim3(32),0,streams[nstreams-1], d_a, nkernels);
checkCudaErrors(hipMemcpyAsync(a, d_a, sizeof(clock_t), hipMemcpyDeviceToHost, streams[nstreams-1]));
// at this point the CPU has dispatched all work for the GPU and can continue processing other tasks in parallel
// in this sample we just wait until the GPU is done
// CHECK: checkCudaErrors(hipEventRecord(stop_event, 0));
// CHECK: checkCudaErrors(hipEventSynchronize(stop_event));
// CHECK: checkCudaErrors(hipEventElapsedTime(&elapsed_time, start_event, stop_event));
checkCudaErrors(hipEventRecord(stop_event, 0));
checkCudaErrors(hipEventSynchronize(stop_event));
checkCudaErrors(hipEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("Expected time for serial execution of %d kernels = %.3fs\n", nkernels, nkernels * kernel_time/1000.0f);
printf("Expected time for concurrent execution of %d kernels = %.3fs\n", nkernels, kernel_time/1000.0f);
printf("Measured time for sample = %.3fs\n", elapsed_time/1000.0f);
bool bTestResult = (a[0] > total_clocks);
// release resources
for (int i = 0; i < nkernels; i++)
{
// CHECK: hipStreamDestroy(streams[i]);
// CHECK: hipEventDestroy(kernelEvent[i]);
hipStreamDestroy(streams[i]);
hipEventDestroy(kernelEvent[i]);
}
free(streams);
free(kernelEvent);
// CHECK: hipEventDestroy(start_event);
// CHECK: hipEventDestroy(stop_event);
// CHECK: hipHostFree(a);
// CHECK: hipFree(d_a);
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
hipHostFree(a);
hipFree(d_a);
if (!bTestResult)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| 3c163aeedd8f1d5bef8983d2af536e2a8a05c8c9.cu | // RUN: %run_test hipify "%s" "%t" %cuda_args
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//
// This sample demonstrates the use of streams for concurrent execution. It also illustrates how to
// introduce dependencies between CUDA streams with the new cudaStreamWaitEvent function introduced
// in CUDA 3.2.
//
// Devices of compute capability 1.x will run the kernels one after another
// Devices of compute capability 2.0 or higher can overlap the kernels
//
#include <stdio.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// This is a kernel that does no real work but runs at least for a specified number of clocks
__global__ void clock_block(clock_t *d_o, clock_t clock_count)
{
unsigned int start_clock = (unsigned int) clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
unsigned int end_clock = (unsigned int) clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
// Single warp reduction kernel
__global__ void sum(clock_t *d_clocks, int N)
{
__shared__ clock_t s_clocks[32];
clock_t my_sum = 0;
for (int i = threadIdx.x; i < N; i+= blockDim.x)
{
my_sum += d_clocks[i];
}
s_clocks[threadIdx.x] = my_sum;
syncthreads();
for (int i=16; i>0; i/=2)
{
if (threadIdx.x < i)
{
s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i];
}
syncthreads();
}
d_clocks[0] = s_clocks[0];
}
int main(int argc, char **argv)
{
int nkernels = 8; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int nbytes = nkernels * sizeof(clock_t); // number of data bytes
float kernel_time = 10; // time the kernel should run in ms
float elapsed_time; // timing variables
int cuda_device = 0;
printf("[%s] - Starting...\n", argv[0]);
// get number of kernels if overridden on the command line
if (checkCmdLineFlag(argc, (const char **)argv, "nkernels"))
{
nkernels = getCmdLineArgumentInt(argc, (const char **)argv, "nkernels");
nstreams = nkernels + 1;
}
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
// CHECK: hipDeviceProp_t deviceProp;
cudaDeviceProp deviceProp;
// CHECK: checkCudaErrors(hipGetDevice(&cuda_device));
checkCudaErrors(cudaGetDevice(&cuda_device));
// CHECK: checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
if ((deviceProp.concurrentKernels == 0))
{
printf("> GPU does not support concurrent kernel execution\n");
printf(" CUDA kernel runs will be serialized\n");
}
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// allocate host memory
clock_t *a = 0; // pointer to the array data in host memory
// CHECK: checkCudaErrors(hipHostMalloc((void **)&a, nbytes));
checkCudaErrors(cudaMallocHost((void **)&a, nbytes));
// allocate device memory
clock_t *d_a = 0; // pointers to data and init value in the device memory
// CHECK: checkCudaErrors(hipMalloc((void **)&d_a, nbytes));
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
// CHECK: hipStream_t *streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
// allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
for (int i = 0; i < nstreams; i++)
{
// CHECK: checkCudaErrors(hipStreamCreate(&(streams[i])));
checkCudaErrors(cudaStreamCreate(&(streams[i])));
}
// CHECK: hipEvent_t start_event, stop_event;
// create CUDA event handles
cudaEvent_t start_event, stop_event;
// CHECK: checkCudaErrors(hipEventCreate(&start_event));
// CHECK: checkCudaErrors(hipEventCreate(&stop_event));
checkCudaErrors(cudaEventCreate(&start_event));
checkCudaErrors(cudaEventCreate(&stop_event));
// the events are used for synchronization only and hence do not need to record timings
// this also makes events not introduce global sync points when recorded which is critical to get overlap
// CHECK: hipEvent_t *kernelEvent;
// CHECK: kernelEvent = (hipEvent_t *) malloc(nkernels * sizeof(hipEvent_t));
cudaEvent_t *kernelEvent;
kernelEvent = (cudaEvent_t *) malloc(nkernels * sizeof(cudaEvent_t));
for (int i = 0; i < nkernels; i++)
{
// CHECK: checkCudaErrors(hipEventCreateWithFlags(&(kernelEvent[i]), hipEventDisableTiming));
checkCudaErrors(cudaEventCreateWithFlags(&(kernelEvent[i]), cudaEventDisableTiming));
}
//////////////////////////////////////////////////////////////////////
// time execution with nkernels streams
clock_t total_clocks = 0;
#if defined(__arm__) || defined(__aarch64__)
// the kernel takes more time than the channel reset time on arm archs, so to prevent hangs reduce time_clocks.
clock_t time_clocks = (clock_t)(kernel_time * (deviceProp.clockRate / 1000));
#else
clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate);
#endif
// CHECK: hipEventRecord(start_event, 0);
cudaEventRecord(start_event, 0);
// queue nkernels in separate streams and record when they are done
for (int i=0; i<nkernels; ++i)
{
// CHECK: hipLaunchKernelGGL(clock_block, dim3(1), dim3(1), 0, streams[i], &d_a[i], time_clocks);
clock_block<<<1,1,0,streams[i]>>>(&d_a[i], time_clocks);
total_clocks += time_clocks;
// CHECK: checkCudaErrors(hipEventRecord(kernelEvent[i], streams[i]));
checkCudaErrors(cudaEventRecord(kernelEvent[i], streams[i]));
// make the last stream wait for the kernel event to be recorded
// CHECK: checkCudaErrors(hipStreamWaitEvent(streams[nstreams-1], kernelEvent[i],0));
checkCudaErrors(cudaStreamWaitEvent(streams[nstreams-1], kernelEvent[i],0));
}
// queue a sum kernel and a copy back to host in the last stream.
// the commands in this stream get dispatched as soon as all the kernel events have been recorded
// CHECK: hipLaunchKernelGGL(sum, dim3(1), dim3(32), 0, streams[nstreams-1], d_a, nkernels);
// CHECK: checkCudaErrors(hipMemcpyAsync(a, d_a, sizeof(clock_t), hipMemcpyDeviceToHost, streams[nstreams-1]));
sum<<<1,32,0,streams[nstreams-1]>>>(d_a, nkernels);
checkCudaErrors(cudaMemcpyAsync(a, d_a, sizeof(clock_t), cudaMemcpyDeviceToHost, streams[nstreams-1]));
// at this point the CPU has dispatched all work for the GPU and can continue processing other tasks in parallel
// in this sample we just wait until the GPU is done
// CHECK: checkCudaErrors(hipEventRecord(stop_event, 0));
// CHECK: checkCudaErrors(hipEventSynchronize(stop_event));
// CHECK: checkCudaErrors(hipEventElapsedTime(&elapsed_time, start_event, stop_event));
checkCudaErrors(cudaEventRecord(stop_event, 0));
checkCudaErrors(cudaEventSynchronize(stop_event));
checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("Expected time for serial execution of %d kernels = %.3fs\n", nkernels, nkernels * kernel_time/1000.0f);
printf("Expected time for concurrent execution of %d kernels = %.3fs\n", nkernels, kernel_time/1000.0f);
printf("Measured time for sample = %.3fs\n", elapsed_time/1000.0f);
bool bTestResult = (a[0] > total_clocks);
// release resources
for (int i = 0; i < nkernels; i++)
{
// CHECK: hipStreamDestroy(streams[i]);
// CHECK: hipEventDestroy(kernelEvent[i]);
cudaStreamDestroy(streams[i]);
cudaEventDestroy(kernelEvent[i]);
}
free(streams);
free(kernelEvent);
// CHECK: hipEventDestroy(start_event);
// CHECK: hipEventDestroy(stop_event);
// CHECK: hipHostFree(a);
// CHECK: hipFree(d_a);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
cudaFreeHost(a);
cudaFree(d_a);
if (!bTestResult)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
43a710e72f5b12f32845d56f32e318c9e280ba96.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CudaAnnealing.cu
*
* Created on: Feb 6, 2019
* Author: alexander
*/
#include "Matrix.h"
#include "Spinset.h"
#include "CudaAnnealing.h"
#include <hip/hip_runtime.h>
#include <sstream>
#include <math.h>
void checkError(hipError_t err, string arg = "") {
if (err != hipSuccess) {
cout << "Error: " << hipGetErrorString(err) << endl;
if (arg != "")
cout << "Additional data: " << arg << endl;
std::exit(-1);
}
}
CudaAnnealing::CudaAnnealing(Matrix _matrix, int _blockCount, float _minDiff) {
minDiff = _minDiff;
// Set pointers to null
devSpins = NULL;
devMat = NULL;
devUnemptyMat = NULL;
meanFieldMembers = NULL;
hamiltonianMembers = NULL;
continueIteration = NULL;
devTemp = NULL;
size = _matrix.getSize();
blockSize = 512;
blockCount = _blockCount;
hipDeviceProp_t deviceProp;
checkError(hipGetDeviceProperties(&deviceProp, 0), "getProp");
blockSize = deviceProp.maxThreadsPerBlock;
// Allocate memory for pointers at GPU
checkError(
hipMalloc((void**) &meanFieldMembers,
sizeof(float) * size * blockCount), "malloc");
hipMalloc((void**) &devMat, sizeof(float) * size * size);
hipMalloc((void**) &devSpins, sizeof(float) * size * blockCount);
hipMalloc((void**) &devUnemptyMat, sizeof(int) * size * (size + 1));
hipMalloc((void**) &hamiltonianMembers, sizeof(double) * size * size);
hipMalloc((void**) &devTemp, sizeof(float) * blockCount);
hipMalloc((void**) &continueIteration, sizeof(bool) * _blockCount);
// Copy model data to GPU memory
checkError(
hipMemcpy(devMat, _matrix.getArray(), sizeof(float) * size * size,
hipMemcpyHostToDevice), "memcpy mat to host");
hipMemcpy(devUnemptyMat, _matrix.getUnemptyMat(),
sizeof(int) * size * (size + 1), hipMemcpyHostToDevice);
}
void CudaAnnealing::loadSet(Spinset set, int setIndex) {
checkError(
hipMemcpy(&devSpins[setIndex * size], set.getArray(),
sizeof(float) * size, hipMemcpyHostToDevice),
"memcpy spinset to device");
hipMemcpy(&devTemp[setIndex], &(set.temp), sizeof(float),
hipMemcpyHostToDevice);
}
void CudaAnnealing::freeAllocatedMemory() {
// Free GPU memory
hipFree(devSpins);
hipFree(devMat);
hipFree(meanFieldMembers);
hipFree(devTemp);
hipFree(devUnemptyMat);
hipFree(hamiltonianMembers);
hipFree(continueIteration);
}
__global__ void allocateHamiltonianMembers(float* devMat, float* devSpins,
int setIndex, int size, double* hamiltonianMembers) {
// Hamiltonian member assignment
int i, j;
int wIndex = threadIdx.x + blockIdx.x * blockDim.x;
while (wIndex < size * size) {
i = wIndex % size;
j = (int) (wIndex / size);
if (i == j)
hamiltonianMembers[wIndex] = devSpins[i + setIndex * size]
* devMat[wIndex];
else
hamiltonianMembers[wIndex] = (double) (devSpins[i + setIndex * size]
* devSpins[j + setIndex * size] * devMat[wIndex]);
wIndex = wIndex + blockDim.x * gridDim.x;
}
}
__global__ void quickSum(double* members, int size) {
// Sum up numbers in specified range within specified pointer
// In the end she sum will be accessible directly from pointer
long long offset = 1;
int wIndex;
while (offset < size) {
wIndex = threadIdx.x;
while ((wIndex * 2 + 1) * offset < size) {
members[wIndex * 2 * offset] += members[(wIndex * 2 + 1) * offset];
wIndex = wIndex + blockDim.x;
}
offset *= 2;
__syncthreads();
}
}
double CudaAnnealing::extractHamiltonian(int index) { // Get hamiltonian from set with index
hipLaunchKernelGGL(( allocateHamiltonianMembers), dim3(blockCount), dim3(blockSize), 0, 0, devMat, devSpins, index, size,
hamiltonianMembers);
hipLaunchKernelGGL(( quickSum), dim3(1), dim3(blockSize), 0, 0, hamiltonianMembers, size * size);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
checkError(err, "Kernel at extractEnergy");
double out;
checkError(
hipMemcpy(&out, hamiltonianMembers, sizeof(double),
hipMemcpyDeviceToHost), "memcpy energy to host");
return out;
}
Spinset CudaAnnealing::extractSet(int index) { // Get spins from set with index
float* hSpins = (float*) malloc(sizeof(float) * size);
checkError(
hipMemcpy(hSpins, &devSpins[index * size], sizeof(float) * size,
hipMemcpyDeviceToHost), "memcpy spins to host");
Spinset outSpins(size);
for (int i = 0; i < size; i++)
outSpins.SetSpin(i, hSpins[i]);
return outSpins;
}
__device__ float meanFieldMember(const float *mat, const float *set,
int spinIndex, int i, int size) { // Returns /Phi_ind
if (i != spinIndex)
return (mat[spinIndex * size + i] + mat[i * size + spinIndex]) * set[i];
else
return mat[spinIndex * size + i];
}
__global__ void cudaKernelAnneal(float* mat, float* spins, int size,
float* temp, float tempStep, float* meanFieldMembers,
bool* proceedFlags, float proceedThreshold, int* unemptyCells,
float linearCoef) {
int blockId = blockIdx.x, thrId = threadIdx.x;
do {
// Decrease temperature
if (thrId == 0)
temp[blockId] = temp[blockId] - tempStep;
// Stabilize
do {
__syncthreads();
// Resetting flags
if (thrId == 0)
proceedFlags[blockId] = false;
for (int spinId = 0; spinId < size; ++spinId) { // Anneal every spin
__syncthreads();
// Mean-field member assignment
int wIndex = thrId;
while (wIndex < unemptyCells[spinId * (size + 1)]) {
meanFieldMembers[wIndex + blockId * size] = meanFieldMember(
mat, spins + blockId * size, spinId,
unemptyCells[spinId * (size + 1) + wIndex + 1],
size);
wIndex = wIndex + blockDim.x;
}
__syncthreads();
// Parallelized mean-field computation
long long offset = 1;
while (offset < unemptyCells[spinId * (size + 1)]) {
wIndex = thrId;
while ((wIndex * 2 + 1) * offset
< unemptyCells[spinId * (size + 1)]) {
meanFieldMembers[wIndex * 2 * offset + blockId * size] +=
meanFieldMembers[(wIndex * 2 + 1) * offset
+ blockId * size];
wIndex = wIndex + blockDim.x;
}
offset *= 2;
__syncthreads();
}
__syncthreads();
// Mean-field calculation complete - write new spin and delta
if (thrId == 0) {
float meanField = meanFieldMembers[blockId * size], old = spins[spinId + blockId * size];
if (temp[blockId] > 0) {
spins[spinId + blockId * size] = -1
* tanh(meanField / temp[blockId]) * linearCoef
+ spins[spinId + blockId * size]
* (1 - linearCoef);
} else if (meanField > 0)
spins[spinId + blockId * size] = -1;
else
spins[spinId + blockId * size] = 1;
if (proceedThreshold
< fabs(old - spins[spinId + blockId * size]))
proceedFlags[blockId] = true; // Too big delta. One more iteration needed
}
__syncthreads();
}
} while (proceedFlags[blockId]);
} while (temp[blockId] >= 0);
}
void CudaAnnealing::anneal(float pStep, float linearCoef) {
hipLaunchKernelGGL(( cudaKernelAnneal), dim3(blockCount), dim3(blockSize), 0, 0, devMat, devSpins, size, devTemp,
pStep, meanFieldMembers, continueIteration, minDiff, devUnemptyMat, linearCoef);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
checkError(err, "Kernel at cudaPull");
}
| 43a710e72f5b12f32845d56f32e318c9e280ba96.cu | /*
* CudaAnnealing.cu
*
* Created on: Feb 6, 2019
* Author: alexander
*/
#include "Matrix.h"
#include "Spinset.h"
#include "CudaAnnealing.h"
#include <cuda_runtime.h>
#include <sstream>
#include <math.h>
void checkError(cudaError_t err, string arg = "") {
if (err != cudaSuccess) {
cout << "Error: " << cudaGetErrorString(err) << endl;
if (arg != "")
cout << "Additional data: " << arg << endl;
std::exit(-1);
}
}
CudaAnnealing::CudaAnnealing(Matrix _matrix, int _blockCount, float _minDiff) {
minDiff = _minDiff;
// Set pointers to null
devSpins = NULL;
devMat = NULL;
devUnemptyMat = NULL;
meanFieldMembers = NULL;
hamiltonianMembers = NULL;
continueIteration = NULL;
devTemp = NULL;
size = _matrix.getSize();
blockSize = 512;
blockCount = _blockCount;
cudaDeviceProp deviceProp;
checkError(cudaGetDeviceProperties(&deviceProp, 0), "getProp");
blockSize = deviceProp.maxThreadsPerBlock;
// Allocate memory for pointers at GPU
checkError(
cudaMalloc((void**) &meanFieldMembers,
sizeof(float) * size * blockCount), "malloc");
cudaMalloc((void**) &devMat, sizeof(float) * size * size);
cudaMalloc((void**) &devSpins, sizeof(float) * size * blockCount);
cudaMalloc((void**) &devUnemptyMat, sizeof(int) * size * (size + 1));
cudaMalloc((void**) &hamiltonianMembers, sizeof(double) * size * size);
cudaMalloc((void**) &devTemp, sizeof(float) * blockCount);
cudaMalloc((void**) &continueIteration, sizeof(bool) * _blockCount);
// Copy model data to GPU memory
checkError(
cudaMemcpy(devMat, _matrix.getArray(), sizeof(float) * size * size,
cudaMemcpyHostToDevice), "memcpy mat to host");
cudaMemcpy(devUnemptyMat, _matrix.getUnemptyMat(),
sizeof(int) * size * (size + 1), cudaMemcpyHostToDevice);
}
void CudaAnnealing::loadSet(Spinset set, int setIndex) {
checkError(
cudaMemcpy(&devSpins[setIndex * size], set.getArray(),
sizeof(float) * size, cudaMemcpyHostToDevice),
"memcpy spinset to device");
cudaMemcpy(&devTemp[setIndex], &(set.temp), sizeof(float),
cudaMemcpyHostToDevice);
}
void CudaAnnealing::freeAllocatedMemory() {
// Free GPU memory
cudaFree(devSpins);
cudaFree(devMat);
cudaFree(meanFieldMembers);
cudaFree(devTemp);
cudaFree(devUnemptyMat);
cudaFree(hamiltonianMembers);
cudaFree(continueIteration);
}
__global__ void allocateHamiltonianMembers(float* devMat, float* devSpins,
int setIndex, int size, double* hamiltonianMembers) {
// Hamiltonian member assignment
int i, j;
int wIndex = threadIdx.x + blockIdx.x * blockDim.x;
while (wIndex < size * size) {
i = wIndex % size;
j = (int) (wIndex / size);
if (i == j)
hamiltonianMembers[wIndex] = devSpins[i + setIndex * size]
* devMat[wIndex];
else
hamiltonianMembers[wIndex] = (double) (devSpins[i + setIndex * size]
* devSpins[j + setIndex * size] * devMat[wIndex]);
wIndex = wIndex + blockDim.x * gridDim.x;
}
}
__global__ void quickSum(double* members, int size) {
// Sum up numbers in specified range within specified pointer
// In the end she sum will be accessible directly from pointer
long long offset = 1;
int wIndex;
while (offset < size) {
wIndex = threadIdx.x;
while ((wIndex * 2 + 1) * offset < size) {
members[wIndex * 2 * offset] += members[(wIndex * 2 + 1) * offset];
wIndex = wIndex + blockDim.x;
}
offset *= 2;
__syncthreads();
}
}
double CudaAnnealing::extractHamiltonian(int index) { // Get hamiltonian from set with index
allocateHamiltonianMembers<<<blockCount, blockSize>>>(devMat, devSpins, index, size,
hamiltonianMembers);
quickSum<<<1, blockSize>>>(hamiltonianMembers, size * size);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
checkError(err, "Kernel at extractEnergy");
double out;
checkError(
cudaMemcpy(&out, hamiltonianMembers, sizeof(double),
cudaMemcpyDeviceToHost), "memcpy energy to host");
return out;
}
Spinset CudaAnnealing::extractSet(int index) { // Get spins from set with index
float* hSpins = (float*) malloc(sizeof(float) * size);
checkError(
cudaMemcpy(hSpins, &devSpins[index * size], sizeof(float) * size,
cudaMemcpyDeviceToHost), "memcpy spins to host");
Spinset outSpins(size);
for (int i = 0; i < size; i++)
outSpins.SetSpin(i, hSpins[i]);
return outSpins;
}
__device__ float meanFieldMember(const float *mat, const float *set,
int spinIndex, int i, int size) { // Returns /Phi_ind
if (i != spinIndex)
return (mat[spinIndex * size + i] + mat[i * size + spinIndex]) * set[i];
else
return mat[spinIndex * size + i];
}
__global__ void cudaKernelAnneal(float* mat, float* spins, int size,
float* temp, float tempStep, float* meanFieldMembers,
bool* proceedFlags, float proceedThreshold, int* unemptyCells,
float linearCoef) {
int blockId = blockIdx.x, thrId = threadIdx.x;
do {
// Decrease temperature
if (thrId == 0)
temp[blockId] = temp[blockId] - tempStep;
// Stabilize
do {
__syncthreads();
// Resetting flags
if (thrId == 0)
proceedFlags[blockId] = false;
for (int spinId = 0; spinId < size; ++spinId) { // Anneal every spin
__syncthreads();
// Mean-field member assignment
int wIndex = thrId;
while (wIndex < unemptyCells[spinId * (size + 1)]) {
meanFieldMembers[wIndex + blockId * size] = meanFieldMember(
mat, spins + blockId * size, spinId,
unemptyCells[spinId * (size + 1) + wIndex + 1],
size);
wIndex = wIndex + blockDim.x;
}
__syncthreads();
// Parallelized mean-field computation
long long offset = 1;
while (offset < unemptyCells[spinId * (size + 1)]) {
wIndex = thrId;
while ((wIndex * 2 + 1) * offset
< unemptyCells[spinId * (size + 1)]) {
meanFieldMembers[wIndex * 2 * offset + blockId * size] +=
meanFieldMembers[(wIndex * 2 + 1) * offset
+ blockId * size];
wIndex = wIndex + blockDim.x;
}
offset *= 2;
__syncthreads();
}
__syncthreads();
// Mean-field calculation complete - write new spin and delta
if (thrId == 0) {
float meanField = meanFieldMembers[blockId * size], old = spins[spinId + blockId * size];
if (temp[blockId] > 0) {
spins[spinId + blockId * size] = -1
* tanh(meanField / temp[blockId]) * linearCoef
+ spins[spinId + blockId * size]
* (1 - linearCoef);
} else if (meanField > 0)
spins[spinId + blockId * size] = -1;
else
spins[spinId + blockId * size] = 1;
if (proceedThreshold
< fabs(old - spins[spinId + blockId * size]))
proceedFlags[blockId] = true; // Too big delta. One more iteration needed
}
__syncthreads();
}
} while (proceedFlags[blockId]);
} while (temp[blockId] >= 0);
}
void CudaAnnealing::anneal(float pStep, float linearCoef) {
cudaKernelAnneal<<<blockCount, blockSize>>>(devMat, devSpins, size, devTemp,
pStep, meanFieldMembers, continueIteration, minDiff, devUnemptyMat, linearCoef);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
checkError(err, "Kernel at cudaPull");
}
|
fa70566cf1356fcabb1f2ad2740b3769f96e9bf4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible?
2 // Assigns every element in an array with its index.
3
4 // nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple
5
6 #include <stdio.h>
7
8 const int N = 16;
9 const int blocksize = 16;
10
11 __global__
12 void simple(float *c)
13 {
14 c[threadIdx.x] = threadIdx.x;
15 }
16
17 int main()
18 {
19 float *c = new float[N];
20 float *cd;
21 const int size = N*sizeof(float);
22
23 hipMalloc( (void**)&cd, size );
24 dim3 dimBlock( blocksize, 1 );
25 dim3 dimGrid( 1, 1 );
26 hipLaunchKernelGGL(( simple), dim3(dimGrid), dim3(dimBlock), 0, 0, cd);
27 hipDeviceSynchronize();
28 hipMemcpy( c, cd, size, hipMemcpyDeviceToHost );
29 hipFree( cd );
30
31 for (int i = 0; i < N; i++)
32 printf("%f ", c[i]);
33 printf("\n");
34 delete[] c;
35 printf("done\n");
36 return EXIT_SUCCESS;
37 }
| fa70566cf1356fcabb1f2ad2740b3769f96e9bf4.cu | // Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible?
2 // Assigns every element in an array with its index.
3
4 // nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple
5
6 #include <stdio.h>
7
8 const int N = 16;
9 const int blocksize = 16;
10
11 __global__
12 void simple(float *c)
13 {
14 c[threadIdx.x] = threadIdx.x;
15 }
16
17 int main()
18 {
19 float *c = new float[N];
20 float *cd;
21 const int size = N*sizeof(float);
22
23 cudaMalloc( (void**)&cd, size );
24 dim3 dimBlock( blocksize, 1 );
25 dim3 dimGrid( 1, 1 );
26 simple<<<dimGrid, dimBlock>>>(cd);
27 cudaThreadSynchronize();
28 cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
29 cudaFree( cd );
30
31 for (int i = 0; i < N; i++)
32 printf("%f ", c[i]);
33 printf("\n");
34 delete[] c;
35 printf("done\n");
36 return EXIT_SUCCESS;
37 }
|
9c986807e37d8b46618878058e7fa7aa01600c37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
#define N ( 1024 * 1024 )
#define RADIUS 5
#define THREADS_PER_BLOCK 512
__global__ void stencil_1d(int n, double *in, double *out)
{
/* calculate global index in the array */
/* insert code to calculate global index in the array using block
and thread built-in variables */
int globalIndex = FIXME
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* code to handle the boundary conditions */
if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) )
{
out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ;
return;
} /* end if */
double result = 0.0;
for( int i = globalIndex-(RADIUS); i <= globalIndex+(RADIUS); i++ )
{
/* add the required elements from the array "in" to the temporary
variable "result */
result += FIXME
}
out[globalIndex] = result;
return;
}
int main()
{
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
double *in, *out;
double *d_in, *d_out;
int size = N * sizeof( double );
/* allocate space for device copies of in, out */
checkCUDA( hipMalloc( (void **) &d_in, size ) );
checkCUDA( hipMalloc( (void **) &d_out, size ) );
/* allocate space for host copies of in, out and setup input values */
in = (double *)malloc( size );
out = (double *)malloc( size );
for( int i = 0; i < N; i++ )
{
in[i] = (double) i;
out[i] = 0;
}
/* copy inputs to device */
checkCUDA( hipMemcpy( d_in, in, size, hipMemcpyHostToDevice ) );
checkCUDA( hipMemset( d_out, 0, size ) );
/* calculate block and grid sizes */
dim3 threads( THREADS_PER_BLOCK, 1, 1);
/* insert code for proper number of blocks in X dimension */
dim3 blocks( FIXME, 1, 1);
/* start the timers */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( stencil_1d), dim3(blocks), dim3(threads) , 0, 0, N, d_in, d_out );
checkKERNEL()
/* stop the timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total time for %d elements was %f ms\n", N, elapsedTime );
/* copy result back to host */
checkCUDA( hipMemcpy( out, d_out, size, hipMemcpyDeviceToHost ) );
int success = 1;
for( int i = 0; i < N; i++ )
{
if( in[i]*( (double)RADIUS*2+1 ) != out[i] )
{
printf("error in element %d in = %f out %f\n",i,in[i],out[i] );
success = 0;
break;
} /* end if */
} /* end for */
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
free(in);
free(out);
checkCUDA( hipFree( d_in ) );
checkCUDA( hipFree( d_out ) );
checkCUDA( hipDeviceReset() );
return 0;
} /* end main */
| 9c986807e37d8b46618878058e7fa7aa01600c37.cu | /*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
#define N ( 1024 * 1024 )
#define RADIUS 5
#define THREADS_PER_BLOCK 512
__global__ void stencil_1d(int n, double *in, double *out)
{
/* calculate global index in the array */
/* insert code to calculate global index in the array using block
and thread built-in variables */
int globalIndex = FIXME
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* code to handle the boundary conditions */
if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) )
{
out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ;
return;
} /* end if */
double result = 0.0;
for( int i = globalIndex-(RADIUS); i <= globalIndex+(RADIUS); i++ )
{
/* add the required elements from the array "in" to the temporary
variable "result */
result += FIXME
}
out[globalIndex] = result;
return;
}
int main()
{
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
double *in, *out;
double *d_in, *d_out;
int size = N * sizeof( double );
/* allocate space for device copies of in, out */
checkCUDA( cudaMalloc( (void **) &d_in, size ) );
checkCUDA( cudaMalloc( (void **) &d_out, size ) );
/* allocate space for host copies of in, out and setup input values */
in = (double *)malloc( size );
out = (double *)malloc( size );
for( int i = 0; i < N; i++ )
{
in[i] = (double) i;
out[i] = 0;
}
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_in, in, size, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemset( d_out, 0, size ) );
/* calculate block and grid sizes */
dim3 threads( THREADS_PER_BLOCK, 1, 1);
/* insert code for proper number of blocks in X dimension */
dim3 blocks( FIXME, 1, 1);
/* start the timers */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
stencil_1d<<< blocks, threads >>>( N, d_in, d_out );
checkKERNEL()
/* stop the timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total time for %d elements was %f ms\n", N, elapsedTime );
/* copy result back to host */
checkCUDA( cudaMemcpy( out, d_out, size, cudaMemcpyDeviceToHost ) );
int success = 1;
for( int i = 0; i < N; i++ )
{
if( in[i]*( (double)RADIUS*2+1 ) != out[i] )
{
printf("error in element %d in = %f out %f\n",i,in[i],out[i] );
success = 0;
break;
} /* end if */
} /* end for */
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
free(in);
free(out);
checkCUDA( cudaFree( d_in ) );
checkCUDA( cudaFree( d_out ) );
checkCUDA( cudaDeviceReset() );
return 0;
} /* end main */
|
6dcdef7d04ed656ec86f8dc16a1aa7a8af40b81e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* plasticity.cu
*
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include "plasticity_kernel.cu"
#include "assist.h"
#define ERROR_CHECK { hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__);}}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
bool if_quiet = true;
unsigned int timer_compute = 0;
unsigned int timer_memory = 0;
int i;
char input_fn[1024];
char output_fn[1024];
data_type * deviceBetaP = NULL, *deviceSigma = NULL;
data_type * deviceFlux = NULL, *deviceVel = NULL;
int width = N, height = N;
int seed = 0;
CUT_DEVICE_INIT(argc, argv);
cutGetCmdLineArgumenti(argc, (const char **) argv, "seed", &seed);
#ifdef LENGTHSCALE
float lengthscale = 0.;
cutGetCmdLineArgumentf(argc, (const char **) argv, "lengthscale", &lengthscale);
printf("Lengthscale: %.2f\n", lengthscale);
#endif
#ifdef LOADING
printf("Loading\n");
#else
printf("Relaxing\n");
#endif
printf("Running seed: %d\n", seed);
if_quiet = true; // If not display matrix contents
//printf("Input matrix file name: %s\n", input_fn);
// -----------------------------------------------------------------------
// Setup host side
// -----------------------------------------------------------------------
printf("Setup host side environment and launch kernel:\n");
// allocate host memory for matrices M and N
printf(" Allocate host memory for matrices.\n");
#ifdef DIMENSION3
printf(" N: %d x %d x %d x %d\n", N, N, N, NUM_COMP);
unsigned int size = N * N * N * NUM_COMP;
int breadth = N;
#else
printf(" N: %d x %d x %d\n", N, N, NUM_COMP);
unsigned int size = N * N * NUM_COMP;
int breadth = 1;
#endif
unsigned int mem_size = sizeof(data_type) * size;
data_type* hostBetaP = (data_type*) malloc(mem_size);
data_type* hostSigma = (data_type*) malloc(mem_size);
data_type* hostFlux = (data_type*) malloc(mem_size);
data_type* hostVel = (data_type*) malloc(mem_size);
// Initialize the input matrices.
printf(" Initialize the input matrices.\n");
double time = 0.;
#ifndef LENGTHSCALE
sprintf(output_fn, FILE_PREFIX FILE_PREFIX2 "cuda_" RUN_DESC "_%d_" PRECISION_STR "_%d_L%d.plas", N, seed, lambda);
#else
sprintf(output_fn, FILE_PREFIX FILE_PREFIX2 "cuda_" RUN_DESC "_%d_" PRECISION_STR "_%d_L%d_l%.2f.plas", N, seed, lambda, lengthscale);
#endif
#ifdef CONTINUE_RUN
FILE *test_fp = fopen(output_fn, "rb");
if (test_fp != NULL) {
fclose(test_fp);
test_fp=NULL;
// Saved file exists
// Load previous state
data_type * matrix;
matrix = ReadMatrixFileFunc(output_fn, 1, breadth*height*width*NUM_COMP+1, 1, if_quiet);
time = (double)*matrix;
printf(" Restarting from t=%f\n", time);
matrix++;
for(i = 0; i < size; i++)
hostBetaP[i] = (data_type) matrix[i];
matrix--;
free(matrix);
} else
{
#endif
// Load from relaxed or initialized file for runs
#ifdef LOADING
data_type * matrix;
sprintf(input_fn, FILE_PREFIX FILE_PREFIX2 "cuda_" RELAX_RUN_DESC "_%d_" PRECISION_STR "_%d_L%d.plas", N, seed, lambda);
matrix = ReadMatrixFileFunc(input_fn, width, breadth*height*NUM_COMP, 1, if_quiet);
#else
double * matrix;
//float * matrix;
#ifndef LENGTHSCALE
sprintf(input_fn, FILE_PREFIX FILE_PREFIX2 "initial_%d_%d.mat", N, seed);
#else
sprintf(input_fn, FILE_PREFIX FILE_PREFIX2 "initial_%d_%d_L%.2f.mat", N, seed, lengthscale);
#endif
matrix = ReadDoubleMatrixFile(input_fn, width, breadth*height*NUM_COMP, 0, if_quiet);
#endif
for(i = 0; i < size; i++)
hostBetaP[i] = (data_type) matrix[i];
free(matrix); matrix = NULL;
}
double timeInc = 0.01;
#ifdef LOADING
double endTime = 3.00/LOADING_RATE;
#else
double endTime = 20.00;
#endif
FILE *data_fp = OpenFile(output_fn,
#ifdef CONTINUE_RUN
"ab",
#else
"wb",
#endif
if_quiet);
#define XSTR(s) STR(s)
#define STR(s) #s
//FILE *data_fp = OpenFile("cudaload_"XSTR(N)"_dp_L%d.plas", "wb", if_quiet);
// ===================================================================
// Allocate device memory for the input matrices.
// Copy memory from the host memory to the device memory.
// ===================================================================
CUT_SAFE_CALL(cutCreateTimer(&timer_memory));
CUT_SAFE_CALL(cutStartTimer(timer_memory));
printf(" Allocate device memory.\n");
CUDA_SAFE_CALL(hipMalloc((void**) &deviceBetaP, mem_size));
setupSystem();
printf(" Copy host memory data to device.\n");
#ifdef DYNAMIC_NUCLEATION
CUDA_SAFE_CALL(hipMalloc((void**) &beta0dot, mem_size));
CUDA_SAFE_CALL(hipMemcpy(beta0dot, hostBetaP, mem_size, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemset(deviceBetaP, 0, mem_size));
{ d_dim_vector L;
L.x = width;
L.y = height;
#ifdef DIMENSION3
L.z = breadth;
#endif
data_type *sigma;
CUDA_SAFE_CALL(hipMalloc((void**) &sigma, mem_size));
calculateSigma(beta0dot, sigma, L);
hipDeviceSynchronize();
printf("max beta0 = %f\n", reduceMax(beta0dot, size));
double max = 0.0; for (int i=0; i<size; i++){ if (hostBetaP[i] > max) max = hostBetaP[i];}
printf("max host = %f\n", max);
double sigmax = reduceMax(sigma, size);
maxNucleationTimestep = 1.0/sqrt(fabs(sigmax));
printf("sigmax %f\n", sigmax);
printf("maxNucleationTimestep = %f\n", maxNucleationTimestep);
CUDA_SAFE_CALL(hipFree(sigma));
}
#else
CUDA_SAFE_CALL(hipMemcpy(deviceBetaP, hostBetaP, mem_size,
hipMemcpyHostToDevice));
#endif
printf(" Allocate device memory for results.\n");
// FIXME - These lines maybe allocating unused memory
/*
CUDA_SAFE_CALL(hipMalloc((void**) &deviceSigma, mem_size));
hipMemset(deviceSigma, 0, mem_size);
CUDA_SAFE_CALL(hipMalloc((void**) &deviceFlux, mem_size));
hipMemset(deviceFlux, 0, mem_size);
CUDA_SAFE_CALL(hipMalloc((void**) &deviceVel, mem_size));
hipMemset(deviceVel, 0, mem_size);
*/
CUT_SAFE_CALL(cutStopTimer(timer_memory));
// ================================================
// Initialize the block and grid dimensions here
// ================================================
printf(" Executing the kernel...\n");
// Start the timer_compute to calculate how much time we spent on it.
CUT_SAFE_CALL(cutCreateTimer(&timer_compute));
CUT_SAFE_CALL(cutStartTimer(timer_compute));
d_dim_vector L;
L.x = width;
L.y = height;
#ifdef DIMENSION3
L.z = breadth;
#endif
// If this is the initial slice
#ifndef LAST_SHOT_ONLY
if (time==0.)
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
#endif
#ifndef DEBUG_TIMESTEPS
while(time < endTime) {
double intermediateTime;
#ifdef LOADING
timeInc = 0.5;
#else
if (time<=0.1)
timeInc = 0.01;
else
if (time <= 1.0)
timeInc = 0.05;
else
if (time <= 5.0)
timeInc = 0.5;
else
timeInc = 1.0;
#endif
intermediateTime = time + timeInc;
while(time < intermediateTime) {
double timeStep = TVDstep(deviceBetaP, L, time, intermediateTime);
printf("%le +%le\n", time, timeStep);
time += timeStep;
}
hipDeviceSynchronize();
hipMemcpy(hostBetaP, deviceBetaP, mem_size, hipMemcpyDeviceToHost);
#ifndef LAST_SHOT_ONLY
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
#endif
}
#ifdef LAST_SHOT_ONLY
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
#endif
#else
#ifndef SINGLE_STEP_DEBUG
int count = 0;
while(count++ < 10) {
double intermediateTime = time+1.0;
double timeStep = TVDstep(deviceBetaP, L, height, time, intermediateTime);
printf("dbg %le +%le\n", time, timeStep);
time += timeStep;
hipDeviceSynchronize();
hipMemcpy(hostBetaP, deviceBetaP, mem_size, hipMemcpyDeviceToHost);
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
}
#else
#ifdef DIMENSION3
#error
#endif
dim3 grid(N/TILEX, N);
dim3 tids(TILEX, 3, 3);
data_type *sigma;
CUDA_SAFE_CALL(hipMalloc((void**) &sigma, sizeof(data_type)*breadth*width*height*NUM_SIG_COMP));
data_type *rhs;
CUDA_SAFE_CALL(hipMalloc((void**) &rhs, sizeof(data_type)*breadth*width*height*NUM_COMP));
data_type *velocity;
CUDA_SAFE_CALL(hipMalloc((void**) &velocity, sizeof(data_type)*breadth*width*height*NUM_COMP));
calculateSigma(deviceBetaP, sigma, width, height);
hipDeviceSynchronize();
hipMemcpy(hostBetaP, sigma, mem_size, hipMemcpyDeviceToHost);
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
// calculate flux
hipLaunchKernelGGL(( centralHJ), dim3(grid), dim3(tids), 0, 0, deviceBetaP, sigma, rhs, velocity, L);
hipDeviceSynchronize();
hipMemcpy(hostBetaP, rhs, mem_size, hipMemcpyDeviceToHost);
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
hipMemcpy(hostBetaP, velocity, mem_size/NUM_COMP, hipMemcpyDeviceToHost);
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
#endif
#endif
// Make sure all threads have finished their jobs
// before we stop the timer_compute.
hipDeviceSynchronize();
fclose( data_fp );
// Stop the timer_compute
CUT_SAFE_CALL(cutStopTimer(timer_compute));
// check if kernel execution generated an error
ERROR_CHECK
CUT_CHECK_ERROR("Kernel execution failed");
// ===================================================================
// Copy the results back from the host
// ===================================================================
printf(" Copy result from device to host.\n");
CUT_SAFE_CALL(cutStartTimer(timer_memory));
//hipMemcpy(hostSigma, deviceSigma, mem_size, hipMemcpyDeviceToHost);
//hipMemcpy(hostFlux, deviceFlux, mem_size, hipMemcpyDeviceToHost);
//hipMemcpy(hostVel, deviceVel, mem_size, hipMemcpyDeviceToHost);
CUT_SAFE_CALL(cutStopTimer(timer_memory));
// ================================================
// Show timing information
// ================================================
printf(" GPU memory access time: %f (ms)\n",
cutGetTimerValue(timer_memory));
printf(" GPU computation time : %f (ms)\n",
cutGetTimerValue(timer_compute));
printf(" GPU processing time : %f (ms)\n",
cutGetTimerValue(timer_compute) + cutGetTimerValue(timer_memory));
CUT_SAFE_CALL(cutDeleteTimer(timer_memory));
CUT_SAFE_CALL(cutDeleteTimer(timer_compute));
//WriteMatrixFile("velocity.mat", hostVel, width, height, if_quiet);
//WriteMatrixFile("rhs.mat", hostFlux, width, NUM_COMP*height, if_quiet);
#if 0
for(i = 0; i < NUM_SIG_COMP; i++) {
for(int j = 0; j < height; j++) {
for(int k = 0; k < width; k++)
fprintf(stdout, "%lf ", hostSigma[(i*height+j)*width+k]);
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
}
#endif
// clean up memory
free(hostBetaP); free(hostSigma);
free(hostFlux); free(hostVel);
// ===================================================================
// Free the device memory
// ===================================================================
CUDA_SAFE_CALL(hipFree(deviceBetaP));
//CUDA_SAFE_CALL(hipFree(deviceSigma));
//CUDA_SAFE_CALL(hipFree(deviceFlux));
//CUDA_SAFE_CALL(hipFree(deviceVel));
}
| 6dcdef7d04ed656ec86f8dc16a1aa7a8af40b81e.cu | /* plasticity.cu
*
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include "plasticity_kernel.cu"
#include "assist.h"
#define ERROR_CHECK { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__);}}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char** argv)
{
bool if_quiet = true;
unsigned int timer_compute = 0;
unsigned int timer_memory = 0;
int i;
char input_fn[1024];
char output_fn[1024];
data_type * deviceBetaP = NULL, *deviceSigma = NULL;
data_type * deviceFlux = NULL, *deviceVel = NULL;
int width = N, height = N;
int seed = 0;
CUT_DEVICE_INIT(argc, argv);
cutGetCmdLineArgumenti(argc, (const char **) argv, "seed", &seed);
#ifdef LENGTHSCALE
float lengthscale = 0.;
cutGetCmdLineArgumentf(argc, (const char **) argv, "lengthscale", &lengthscale);
printf("Lengthscale: %.2f\n", lengthscale);
#endif
#ifdef LOADING
printf("Loading\n");
#else
printf("Relaxing\n");
#endif
printf("Running seed: %d\n", seed);
if_quiet = true; // If not display matrix contents
//printf("Input matrix file name: %s\n", input_fn);
// -----------------------------------------------------------------------
// Setup host side
// -----------------------------------------------------------------------
printf("Setup host side environment and launch kernel:\n");
// allocate host memory for matrices M and N
printf(" Allocate host memory for matrices.\n");
#ifdef DIMENSION3
printf(" N: %d x %d x %d x %d\n", N, N, N, NUM_COMP);
unsigned int size = N * N * N * NUM_COMP;
int breadth = N;
#else
printf(" N: %d x %d x %d\n", N, N, NUM_COMP);
unsigned int size = N * N * NUM_COMP;
int breadth = 1;
#endif
unsigned int mem_size = sizeof(data_type) * size;
data_type* hostBetaP = (data_type*) malloc(mem_size);
data_type* hostSigma = (data_type*) malloc(mem_size);
data_type* hostFlux = (data_type*) malloc(mem_size);
data_type* hostVel = (data_type*) malloc(mem_size);
// Initialize the input matrices.
printf(" Initialize the input matrices.\n");
double time = 0.;
#ifndef LENGTHSCALE
sprintf(output_fn, FILE_PREFIX FILE_PREFIX2 "cuda_" RUN_DESC "_%d_" PRECISION_STR "_%d_L%d.plas", N, seed, lambda);
#else
sprintf(output_fn, FILE_PREFIX FILE_PREFIX2 "cuda_" RUN_DESC "_%d_" PRECISION_STR "_%d_L%d_l%.2f.plas", N, seed, lambda, lengthscale);
#endif
#ifdef CONTINUE_RUN
FILE *test_fp = fopen(output_fn, "rb");
if (test_fp != NULL) {
fclose(test_fp);
test_fp=NULL;
// Saved file exists
// Load previous state
data_type * matrix;
matrix = ReadMatrixFileFunc(output_fn, 1, breadth*height*width*NUM_COMP+1, 1, if_quiet);
time = (double)*matrix;
printf(" Restarting from t=%f\n", time);
matrix++;
for(i = 0; i < size; i++)
hostBetaP[i] = (data_type) matrix[i];
matrix--;
free(matrix);
} else
{
#endif
// Load from relaxed or initialized file for runs
#ifdef LOADING
data_type * matrix;
sprintf(input_fn, FILE_PREFIX FILE_PREFIX2 "cuda_" RELAX_RUN_DESC "_%d_" PRECISION_STR "_%d_L%d.plas", N, seed, lambda);
matrix = ReadMatrixFileFunc(input_fn, width, breadth*height*NUM_COMP, 1, if_quiet);
#else
double * matrix;
//float * matrix;
#ifndef LENGTHSCALE
sprintf(input_fn, FILE_PREFIX FILE_PREFIX2 "initial_%d_%d.mat", N, seed);
#else
sprintf(input_fn, FILE_PREFIX FILE_PREFIX2 "initial_%d_%d_L%.2f.mat", N, seed, lengthscale);
#endif
matrix = ReadDoubleMatrixFile(input_fn, width, breadth*height*NUM_COMP, 0, if_quiet);
#endif
for(i = 0; i < size; i++)
hostBetaP[i] = (data_type) matrix[i];
free(matrix); matrix = NULL;
}
double timeInc = 0.01;
#ifdef LOADING
double endTime = 3.00/LOADING_RATE;
#else
double endTime = 20.00;
#endif
FILE *data_fp = OpenFile(output_fn,
#ifdef CONTINUE_RUN
"ab",
#else
"wb",
#endif
if_quiet);
#define XSTR(s) STR(s)
#define STR(s) #s
//FILE *data_fp = OpenFile("cudaload_"XSTR(N)"_dp_L%d.plas", "wb", if_quiet);
// ===================================================================
// Allocate device memory for the input matrices.
// Copy memory from the host memory to the device memory.
// ===================================================================
CUT_SAFE_CALL(cutCreateTimer(&timer_memory));
CUT_SAFE_CALL(cutStartTimer(timer_memory));
printf(" Allocate device memory.\n");
CUDA_SAFE_CALL(cudaMalloc((void**) &deviceBetaP, mem_size));
setupSystem();
printf(" Copy host memory data to device.\n");
#ifdef DYNAMIC_NUCLEATION
CUDA_SAFE_CALL(cudaMalloc((void**) &beta0dot, mem_size));
CUDA_SAFE_CALL(cudaMemcpy(beta0dot, hostBetaP, mem_size, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemset(deviceBetaP, 0, mem_size));
{ d_dim_vector L;
L.x = width;
L.y = height;
#ifdef DIMENSION3
L.z = breadth;
#endif
data_type *sigma;
CUDA_SAFE_CALL(cudaMalloc((void**) &sigma, mem_size));
calculateSigma(beta0dot, sigma, L);
cudaThreadSynchronize();
printf("max beta0 = %f\n", reduceMax(beta0dot, size));
double max = 0.0; for (int i=0; i<size; i++){ if (hostBetaP[i] > max) max = hostBetaP[i];}
printf("max host = %f\n", max);
double sigmax = reduceMax(sigma, size);
maxNucleationTimestep = 1.0/sqrt(fabs(sigmax));
printf("sigmax %f\n", sigmax);
printf("maxNucleationTimestep = %f\n", maxNucleationTimestep);
CUDA_SAFE_CALL(cudaFree(sigma));
}
#else
CUDA_SAFE_CALL(cudaMemcpy(deviceBetaP, hostBetaP, mem_size,
cudaMemcpyHostToDevice));
#endif
printf(" Allocate device memory for results.\n");
// FIXME - These lines maybe allocating unused memory
/*
CUDA_SAFE_CALL(cudaMalloc((void**) &deviceSigma, mem_size));
cudaMemset(deviceSigma, 0, mem_size);
CUDA_SAFE_CALL(cudaMalloc((void**) &deviceFlux, mem_size));
cudaMemset(deviceFlux, 0, mem_size);
CUDA_SAFE_CALL(cudaMalloc((void**) &deviceVel, mem_size));
cudaMemset(deviceVel, 0, mem_size);
*/
CUT_SAFE_CALL(cutStopTimer(timer_memory));
// ================================================
// Initialize the block and grid dimensions here
// ================================================
printf(" Executing the kernel...\n");
// Start the timer_compute to calculate how much time we spent on it.
CUT_SAFE_CALL(cutCreateTimer(&timer_compute));
CUT_SAFE_CALL(cutStartTimer(timer_compute));
d_dim_vector L;
L.x = width;
L.y = height;
#ifdef DIMENSION3
L.z = breadth;
#endif
// If this is the initial slice
#ifndef LAST_SHOT_ONLY
if (time==0.)
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
#endif
#ifndef DEBUG_TIMESTEPS
while(time < endTime) {
double intermediateTime;
#ifdef LOADING
timeInc = 0.5;
#else
if (time<=0.1)
timeInc = 0.01;
else
if (time <= 1.0)
timeInc = 0.05;
else
if (time <= 5.0)
timeInc = 0.5;
else
timeInc = 1.0;
#endif
intermediateTime = time + timeInc;
while(time < intermediateTime) {
double timeStep = TVDstep(deviceBetaP, L, time, intermediateTime);
printf("%le +%le\n", time, timeStep);
time += timeStep;
}
cudaThreadSynchronize();
cudaMemcpy(hostBetaP, deviceBetaP, mem_size, cudaMemcpyDeviceToHost);
#ifndef LAST_SHOT_ONLY
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
#endif
}
#ifdef LAST_SHOT_ONLY
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
#endif
#else
#ifndef SINGLE_STEP_DEBUG
int count = 0;
while(count++ < 10) {
double intermediateTime = time+1.0;
double timeStep = TVDstep(deviceBetaP, L, height, time, intermediateTime);
printf("dbg %le +%le\n", time, timeStep);
time += timeStep;
cudaThreadSynchronize();
cudaMemcpy(hostBetaP, deviceBetaP, mem_size, cudaMemcpyDeviceToHost);
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
}
#else
#ifdef DIMENSION3
#error
#endif
dim3 grid(N/TILEX, N);
dim3 tids(TILEX, 3, 3);
data_type *sigma;
CUDA_SAFE_CALL(cudaMalloc((void**) &sigma, sizeof(data_type)*breadth*width*height*NUM_SIG_COMP));
data_type *rhs;
CUDA_SAFE_CALL(cudaMalloc((void**) &rhs, sizeof(data_type)*breadth*width*height*NUM_COMP));
data_type *velocity;
CUDA_SAFE_CALL(cudaMalloc((void**) &velocity, sizeof(data_type)*breadth*width*height*NUM_COMP));
calculateSigma(deviceBetaP, sigma, width, height);
cudaThreadSynchronize();
cudaMemcpy(hostBetaP, sigma, mem_size, cudaMemcpyDeviceToHost);
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
// calculate flux
centralHJ<<<grid, tids>>>(deviceBetaP, sigma, rhs, velocity, L);
cudaThreadSynchronize();
cudaMemcpy(hostBetaP, rhs, mem_size, cudaMemcpyDeviceToHost);
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
cudaMemcpy(hostBetaP, velocity, mem_size/NUM_COMP, cudaMemcpyDeviceToHost);
ContinueWriteMatrix( data_fp, hostBetaP, time, width, breadth*height*NUM_COMP, if_quiet);
#endif
#endif
// Make sure all threads have finished their jobs
// before we stop the timer_compute.
cudaThreadSynchronize();
fclose( data_fp );
// Stop the timer_compute
CUT_SAFE_CALL(cutStopTimer(timer_compute));
// check if kernel execution generated an error
ERROR_CHECK
CUT_CHECK_ERROR("Kernel execution failed");
// ===================================================================
// Copy the results back from the host
// ===================================================================
printf(" Copy result from device to host.\n");
CUT_SAFE_CALL(cutStartTimer(timer_memory));
//cudaMemcpy(hostSigma, deviceSigma, mem_size, cudaMemcpyDeviceToHost);
//cudaMemcpy(hostFlux, deviceFlux, mem_size, cudaMemcpyDeviceToHost);
//cudaMemcpy(hostVel, deviceVel, mem_size, cudaMemcpyDeviceToHost);
CUT_SAFE_CALL(cutStopTimer(timer_memory));
// ================================================
// Show timing information
// ================================================
printf(" GPU memory access time: %f (ms)\n",
cutGetTimerValue(timer_memory));
printf(" GPU computation time : %f (ms)\n",
cutGetTimerValue(timer_compute));
printf(" GPU processing time : %f (ms)\n",
cutGetTimerValue(timer_compute) + cutGetTimerValue(timer_memory));
CUT_SAFE_CALL(cutDeleteTimer(timer_memory));
CUT_SAFE_CALL(cutDeleteTimer(timer_compute));
//WriteMatrixFile("velocity.mat", hostVel, width, height, if_quiet);
//WriteMatrixFile("rhs.mat", hostFlux, width, NUM_COMP*height, if_quiet);
#if 0
for(i = 0; i < NUM_SIG_COMP; i++) {
for(int j = 0; j < height; j++) {
for(int k = 0; k < width; k++)
fprintf(stdout, "%lf ", hostSigma[(i*height+j)*width+k]);
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
}
#endif
// clean up memory
free(hostBetaP); free(hostSigma);
free(hostFlux); free(hostVel);
// ===================================================================
// Free the device memory
// ===================================================================
CUDA_SAFE_CALL(cudaFree(deviceBetaP));
//CUDA_SAFE_CALL(cudaFree(deviceSigma));
//CUDA_SAFE_CALL(cudaFree(deviceFlux));
//CUDA_SAFE_CALL(cudaFree(deviceVel));
}
|
ca8c5d54455ec20e3c2ab6be94d30041c2d72e98.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
/*
* This example demonstrates P2P ping-ponging of data from one GPU to another,
* within the same node. By enabling peer-to-peer transfers, you ensure that
* copies between GPUs go directly over the PCIe bus. If P2P is not enabled,
* host memory must be used as a staging area for GPU-to-GPU cudaMemcpys.
*/
inline bool isCapableP2P(int ngpus)
{
//hipDeviceProp_t prop[ngpus];
hipDeviceProp_t *prop = (hipDeviceProp_t *)malloc(ngpus * sizeof(hipDeviceProp_t));
int iCount = 0;
for (int i = 0; i < ngpus; i++)
{
CHECK(hipGetDeviceProperties(&prop[i], i));
if (prop[i].major >= 2) iCount++;
printf("> GPU%d: %s %s capable of Peer-to-Peer access\n",
i, prop[i].name, (prop[i].major >= 2 ? "is" : "not"));
}
if (iCount != ngpus)
{
printf("> no enough device to run this application\n");
}
return (iCount == ngpus);
}
/*
* enable P2P memcopies between GPUs (all GPUs must be compute capability 2.0 or
* later (Fermi or later)).
*/
inline void enableP2P(int ngpus)
{
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
for (int j = 0; j < ngpus; j++)
{
if (i == j) continue;
int peer_access_available = 0;
CHECK(hipDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available)
{
CHECK(hipDeviceEnablePeerAccess(j, 0));
printf("> GPU%d enabled direct access to GPU%d\n", i, j);
}
else
{
printf("(%d, %d)\n", i, j );
}
}
}
}
inline void disableP2P(int ngpus)
{
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
for (int j = 0; j < ngpus; j++)
{
if (i == j) continue;
int peer_access_available = 0;
CHECK(hipDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available)
{
CHECK(hipDeviceDisablePeerAccess(j));
printf("> GPU%d disabled direct access to GPU%d\n", i, j);
}
}
}
}
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)rand() / (float)RAND_MAX;
}
}
int main(int argc, char **argv)
{
int ngpus;
// check device count
CHECK(hipGetDeviceCount(&ngpus));
printf("> CUDA-capable device count: %i\n", ngpus);
// check p2p capability
isCapableP2P(ngpus);
// get ngpus from command line
if (argc > 1)
{
if (atoi(argv[1]) > ngpus)
{
fprintf(stderr, "Invalid number of GPUs specified: %d is greater "
"than the total number of GPUs in this platform (%d)\n",
atoi(argv[1]), ngpus);
return 1;
}
ngpus = atoi(argv[1]);
}
if (ngpus > 2)
{
fprintf(stderr, "No more than 2 GPUs supported\n");
return 1;
}
if (ngpus > 1) enableP2P(ngpus);
// Allocate buffers
int iSize = 1024 * 1024 * 16;
const size_t iBytes = iSize * sizeof(float);
printf("\nAllocating buffers (%iMB on each GPU and CPU Host)...\n",
int(iBytes / 1024 / 1024));
float **d_src = (float **)malloc(sizeof(float) * ngpus);
float **d_rcv = (float **)malloc(sizeof(float) * ngpus);
float **h_src = (float **)malloc(sizeof(float) * ngpus);
hipStream_t *stream = (hipStream_t *)malloc(sizeof(hipStream_t) * ngpus);
// Create CUDA event handles
hipEvent_t start, stop;
CHECK(hipSetDevice(0));
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
CHECK(hipMalloc(&d_src[i], iBytes));
CHECK(hipMalloc(&d_rcv[i], iBytes));
CHECK(hipHostMalloc((void **) &h_src[i], iBytes));
CHECK(hipStreamCreate(&stream[i]));
}
for (int i = 0; i < ngpus; i++)
{
initialData(h_src[i], iSize);
}
// unidirectional gmem copy
CHECK(hipSetDevice(0));
CHECK(hipEventRecord(start, 0));
for (int i = 0; i < 100; i++)
{
if (i % 2 == 0)
{
CHECK(hipMemcpy(d_src[1], d_src[0], iBytes, hipMemcpyDeviceToDevice));
}
else
{
CHECK(hipMemcpy(d_src[0], d_src[1], iBytes, hipMemcpyDeviceToDevice));
}
}
CHECK(hipSetDevice(0));
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
float elapsed_time_ms;
CHECK(hipEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= 100.0f;
printf("Ping-pong unidirectional hipMemcpy:\t %8.2f ms ", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float)iBytes / (elapsed_time_ms * 1e6f));
// bidirectional asynchronous gmem copy
CHECK(hipEventRecord(start, 0));
for (int i = 0; i < 100; i++)
{
CHECK(hipMemcpyAsync(d_src[1], d_src[0], iBytes, hipMemcpyDeviceToDevice, stream[0]));
CHECK(hipMemcpyAsync(d_rcv[0], d_rcv[1], iBytes, hipMemcpyDeviceToDevice, stream[1]));
}
CHECK(hipSetDevice(0));
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
elapsed_time_ms = 0.0f;
CHECK(hipEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= 100.0f;
printf("Ping-pong bidirectional hipMemcpyAsync:\t %8.2f ms ", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float)2.0f * iBytes / (elapsed_time_ms * 1e6f));
disableP2P(ngpus);
// free
CHECK(hipSetDevice(0));
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
for (int i = 0; i < ngpus; i++)
{
CHECK(hipSetDevice(i));
CHECK(hipFree(d_src[i]));
CHECK(hipFree(d_rcv[i]));
CHECK(hipStreamDestroy(stream[i]));
CHECK(hipDeviceReset());
}
exit(EXIT_SUCCESS);
}
| ca8c5d54455ec20e3c2ab6be94d30041c2d72e98.cu | #include "../common/common.h"
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
/*
* This example demonstrates P2P ping-ponging of data from one GPU to another,
* within the same node. By enabling peer-to-peer transfers, you ensure that
* copies between GPUs go directly over the PCIe bus. If P2P is not enabled,
* host memory must be used as a staging area for GPU-to-GPU cudaMemcpys.
*/
inline bool isCapableP2P(int ngpus)
{
//cudaDeviceProp prop[ngpus];
cudaDeviceProp *prop = (cudaDeviceProp *)malloc(ngpus * sizeof(cudaDeviceProp));
int iCount = 0;
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaGetDeviceProperties(&prop[i], i));
if (prop[i].major >= 2) iCount++;
printf("> GPU%d: %s %s capable of Peer-to-Peer access\n",
i, prop[i].name, (prop[i].major >= 2 ? "is" : "not"));
}
if (iCount != ngpus)
{
printf("> no enough device to run this application\n");
}
return (iCount == ngpus);
}
/*
* enable P2P memcopies between GPUs (all GPUs must be compute capability 2.0 or
* later (Fermi or later)).
*/
inline void enableP2P(int ngpus)
{
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
for (int j = 0; j < ngpus; j++)
{
if (i == j) continue;
int peer_access_available = 0;
CHECK(cudaDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available)
{
CHECK(cudaDeviceEnablePeerAccess(j, 0));
printf("> GPU%d enabled direct access to GPU%d\n", i, j);
}
else
{
printf("(%d, %d)\n", i, j );
}
}
}
}
inline void disableP2P(int ngpus)
{
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
for (int j = 0; j < ngpus; j++)
{
if (i == j) continue;
int peer_access_available = 0;
CHECK(cudaDeviceCanAccessPeer(&peer_access_available, i, j));
if (peer_access_available)
{
CHECK(cudaDeviceDisablePeerAccess(j));
printf("> GPU%d disabled direct access to GPU%d\n", i, j);
}
}
}
}
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)rand() / (float)RAND_MAX;
}
}
int main(int argc, char **argv)
{
int ngpus;
// check device count
CHECK(cudaGetDeviceCount(&ngpus));
printf("> CUDA-capable device count: %i\n", ngpus);
// check p2p capability
isCapableP2P(ngpus);
// get ngpus from command line
if (argc > 1)
{
if (atoi(argv[1]) > ngpus)
{
fprintf(stderr, "Invalid number of GPUs specified: %d is greater "
"than the total number of GPUs in this platform (%d)\n",
atoi(argv[1]), ngpus);
return 1;
}
ngpus = atoi(argv[1]);
}
if (ngpus > 2)
{
fprintf(stderr, "No more than 2 GPUs supported\n");
return 1;
}
if (ngpus > 1) enableP2P(ngpus);
// Allocate buffers
int iSize = 1024 * 1024 * 16;
const size_t iBytes = iSize * sizeof(float);
printf("\nAllocating buffers (%iMB on each GPU and CPU Host)...\n",
int(iBytes / 1024 / 1024));
float **d_src = (float **)malloc(sizeof(float) * ngpus);
float **d_rcv = (float **)malloc(sizeof(float) * ngpus);
float **h_src = (float **)malloc(sizeof(float) * ngpus);
cudaStream_t *stream = (cudaStream_t *)malloc(sizeof(cudaStream_t) * ngpus);
// Create CUDA event handles
cudaEvent_t start, stop;
CHECK(cudaSetDevice(0));
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
CHECK(cudaMalloc(&d_src[i], iBytes));
CHECK(cudaMalloc(&d_rcv[i], iBytes));
CHECK(cudaMallocHost((void **) &h_src[i], iBytes));
CHECK(cudaStreamCreate(&stream[i]));
}
for (int i = 0; i < ngpus; i++)
{
initialData(h_src[i], iSize);
}
// unidirectional gmem copy
CHECK(cudaSetDevice(0));
CHECK(cudaEventRecord(start, 0));
for (int i = 0; i < 100; i++)
{
if (i % 2 == 0)
{
CHECK(cudaMemcpy(d_src[1], d_src[0], iBytes, cudaMemcpyDeviceToDevice));
}
else
{
CHECK(cudaMemcpy(d_src[0], d_src[1], iBytes, cudaMemcpyDeviceToDevice));
}
}
CHECK(cudaSetDevice(0));
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
float elapsed_time_ms;
CHECK(cudaEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= 100.0f;
printf("Ping-pong unidirectional cudaMemcpy:\t %8.2f ms ", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float)iBytes / (elapsed_time_ms * 1e6f));
// bidirectional asynchronous gmem copy
CHECK(cudaEventRecord(start, 0));
for (int i = 0; i < 100; i++)
{
CHECK(cudaMemcpyAsync(d_src[1], d_src[0], iBytes, cudaMemcpyDeviceToDevice, stream[0]));
CHECK(cudaMemcpyAsync(d_rcv[0], d_rcv[1], iBytes, cudaMemcpyDeviceToDevice, stream[1]));
}
CHECK(cudaSetDevice(0));
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
elapsed_time_ms = 0.0f;
CHECK(cudaEventElapsedTime(&elapsed_time_ms, start, stop));
elapsed_time_ms /= 100.0f;
printf("Ping-pong bidirectional cudaMemcpyAsync:\t %8.2f ms ", elapsed_time_ms);
printf("performance: %8.2f GB/s\n", (float)2.0f * iBytes / (elapsed_time_ms * 1e6f));
disableP2P(ngpus);
// free
CHECK(cudaSetDevice(0));
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
for (int i = 0; i < ngpus; i++)
{
CHECK(cudaSetDevice(i));
CHECK(cudaFree(d_src[i]));
CHECK(cudaFree(d_rcv[i]));
CHECK(cudaStreamDestroy(stream[i]));
CHECK(cudaDeviceReset());
}
exit(EXIT_SUCCESS);
}
|
b52f63991689b6ee0a99e50aa01befaa13d2b157.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. L2 has misses even when data size is less than 1024 * 384 (1.5m). So the eviction policy seems not to be LRU.
//typedef unsigned char byte;
void init_cpu_data(int* A, int size, int stride, int mod){
for (int i = 0; i < size; ++i){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, int iterations, int *B, int *C, long long int *D, float clock_rate, int mod, int data_stride){
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, 512, B, C, D, B[0], clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
for(int data_stride = 32; data_stride <= 32; data_stride = data_stride + 1){/////////stride shall be L1 cache line size.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m
for(int mod = 1024 * 384; mod <= 1024 * 384 + 32 * 64; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 512 * 1024 * 30;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(int) * data_size));
hipMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < 512; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
checkCudaErrors(hipFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
| b52f63991689b6ee0a99e50aa01befaa13d2b157.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. L2 has misses even when data size is less than 1024 * 384 (1.5m). So the eviction policy seems not to be LRU.
//typedef unsigned char byte;
void init_cpu_data(int* A, int size, int stride, int mod){
for (int i = 0; i < size; ++i){
A[i]=(i + stride) % mod;
}
}
__device__ void P_chasing0(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
//long long int start_time = 0;//////clock
//long long int end_time = 0;//////clock
//start_time = clock64();//////clock
for (int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//long long int total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, int *A, int iterations, int *B, int *C, long long int *D, int starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ int s_index[1024 * 4];
//__shared__ int s_index[1];
int j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(int *A, int iterations, int *B, int *C, long long int *D, float clock_rate, int mod, int data_stride){
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, 512, B, C, D, B[0], clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(int) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
for(int data_stride = 32; data_stride <= 32; data_stride = data_stride + 1){/////////stride shall be L1 cache line size.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m
for(int mod = 1024 * 384; mod <= 1024 * 384 + 32 * 64; mod = mod + 32){/////kepler L2 1.5m /////kepler L1 16KB ////////saturate the L1 not L2
///////////////////////////////////////////////////////////////////CPU data begin
int data_size = 512 * 1024 * 30;/////size = iteration * stride = 30 2mb pages.
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
int *CPU_data_in;
CPU_data_in = (int*)malloc(sizeof(int) * data_size);
init_cpu_data(CPU_data_in, data_size, data_stride, mod);
int *CPU_data_out_index;
CPU_data_out_index = (int*)malloc(sizeof(int) * iterations);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * iterations);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
int *GPU_data_in;
checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(int) * data_size));
cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(int) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
int *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(int) * iterations));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * iterations));
tlb_latency_test<<<1, 1>>>(GPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(int) * iterations, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * iterations, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%d##############%d\n", mod, (mod - 1024 * 4) / 32);
for (int it = 0; it < 512; it++){
fprintf (pFile, "%d %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
checkCudaErrors(cudaFree(GPU_data_in));
free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
4323d46b28f9fb2676f0e783e3d09fe3180bd9c2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#define N 10
__global__ void add(int *a, int *b, int *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
c[tid] = a[tid] + b[tid];
}
}
int main(void) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
hipMalloc((void **) &dev_a, N * sizeof(int));
hipMalloc((void **) &dev_b, N * sizeof(int));
hipMalloc((void **) &dev_c, N * sizeof(int));
for (int i = 0; i < N; i++) {
a[i] = i;
b[i] = i * 2;
}
hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_c, c, N * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add) , dim3(1), dim3(N), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%d+%d=%d\n", a[i], b[i], c[i]);
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | 4323d46b28f9fb2676f0e783e3d09fe3180bd9c2.cu | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define N 10
__global__ void add(int *a, int *b, int *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
c[tid] = a[tid] + b[tid];
}
}
int main(void) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, N * sizeof(int));
cudaMalloc((void **) &dev_b, N * sizeof(int));
cudaMalloc((void **) &dev_c, N * sizeof(int));
for (int i = 0; i < N; i++) {
a[i] = i;
b[i] = i * 2;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, N * sizeof(int), cudaMemcpyHostToDevice);
add <<<1, N>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%d+%d=%d\n", a[i], b[i], c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
7ad50e8265dc638633db87d77b1e0a94d9a7c919.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#define N 100
#define DIM 2
char le_entrada();
char inicializa_parametros();
double * aloca_matriz(int, int);
void cal_cond_robin();
char parametro_independentes();
char copia_dados_para_gpu();
void copia_dados_para_cpu();
//char calcula_pressao_velocidade(int, int, int, int, int);
//char atualiza_mult_lagrange(int tid);
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
//- - - - - - - - - - - - - - GLOBAIS - - - - - - - - - - - - - - //
/* - - - - - - - Entradas Externas - - - - - - - */
int tam_mat_interna = 3, tam_mat_real = 3 + 2, max_interacoes = 1000, op_contorno = 1;
double tam_regiao = 20000.00, erro_max = 1e-5, valor_contor = 2.00;
double h = 20000.00 / 3; // ALTURA H = TAM_REGIAO / TAM_MAT_INTERNA
double *mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL;
/* - - - - - - - Fim das Entradas Externas - - - - - - - */
/* - - - - - - - Ponteiros para CPU - - - - - - - */
double *q_R = NULL, *q_L = NULL, *q_U = NULL, *q_D = NULL;
double *q_R_old = NULL, *q_L_old = NULL, *q_U_old = NULL, *q_D_old = NULL;
double *l_R = NULL, *l_L = NULL, *l_U = NULL, *l_D = NULL;
double *l_R_old = NULL, *l_L_old = NULL, *l_U_old = NULL, *l_D_old = NULL;
double *b_R = NULL, *b_L = NULL, *b_U = NULL, *b_D = NULL;
double *b_R_old = NULL, *b_L_old = NULL, *b_U_old = NULL, *b_D_old = NULL;
double *pressao = NULL, *pressao_old = NULL;
/* - - - - - - - Ponteiros para GPU - - - - - - - */
double *dev_mat_perm = NULL, *dev_mat_font = NULL, *dev_mat_epsilon = NULL;
double *dev_q_R = NULL, *dev_q_L = NULL, *dev_q_U = NULL, *dev_q_D = NULL;
double *dev_q_R_old = NULL, *dev_q_L_old = NULL, *dev_q_U_old = NULL, *dev_q_D_old = NULL;
double *dev_l_R = NULL, *dev_l_L = NULL, *dev_l_U = NULL, *dev_l_D = NULL;
double *dev_l_R_old = NULL, *dev_l_L_old = NULL, *dev_l_U_old = NULL, *dev_l_D_old = NULL;
double *dev_b_R = NULL, *dev_b_L = NULL, *dev_b_U = NULL, *dev_b_D = NULL;
double *dev_b_R_old = NULL, *dev_b_L_old = NULL, *dev_b_U_old = NULL, *dev_b_D_old = NULL;
double *dev_pressao = NULL, *dev_pressao_old = NULL;
double *dev_aux = NULL, *dev_erro = NULL, *dev_media = NULL, *dev_sum1 = NULL, *dev_sum2 = NULL;
//- - - - - - - - - - - - - - FIM - GLOBAIS - - - - - - - - - - - - - - //
__device__ char atualiza_mult_lagrange(int tid,
double *dev_l_U, double *dev_l_D, double *dev_l_R, double *dev_l_L,
double *dev_l_U_old, double *dev_l_D_old, double *dev_l_R_old, double *dev_l_L_old,
double *dev_b_U, double *dev_b_D, double *dev_b_R, double *dev_b_L,
double *dev_q_U, double *dev_q_D, double *dev_q_R, double *dev_q_L,
double *dev_q_U_old, double *dev_q_D_old, double *dev_q_R_old, double *dev_q_L_old
){
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1;
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
dev_l_U[index_mem_central] = dev_b_U[index_mem_central] * (dev_q_U[index_mem_central] + dev_q_D_old[index_mem_uper]) + dev_l_D_old[index_mem_uper];
dev_l_D[index_mem_central] = dev_b_D[index_mem_central] * (dev_q_D[index_mem_central] + dev_q_U_old[index_mem_down]) + dev_l_U_old[index_mem_down];
dev_l_R[index_mem_central] = dev_b_R[index_mem_central] * (dev_q_R[index_mem_central] + dev_q_L_old[index_mem_right]) + dev_l_L_old[index_mem_right];
dev_l_L[index_mem_central] = dev_b_L[index_mem_central] * (dev_q_L[index_mem_central] + dev_q_R_old[index_mem_left]) + dev_l_R_old[index_mem_left];
return 0;
}
__device__ char calcula_pressao_velocidade(int tid, int uper, int right, int down, int left,
double *dev_mat_epsilon, double *dev_pressao, double *dev_mat_font,
double *dev_l_U_old, double *dev_l_D_old, double *dev_l_R_old, double *dev_l_L_old,
double *dev_b_U, double *dev_b_D, double *dev_b_R, double *dev_b_L,
double *dev_q_U, double *dev_q_D, double *dev_q_R, double *dev_q_L,
double *dev_q_U_old, double *dev_q_D_old, double *dev_q_R_old, double *dev_q_L_old
){
double auxU = 0.0, auxD = 0.0, auxR = 0.0, auxL = 0.0, DU = 0.0, DD = 0.0, DR = 0.0, DL = 0.0;
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1;
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
if(uper == 1){
auxU = dev_mat_epsilon[index_mem_central] / (1 + dev_b_U[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DU = auxU * (dev_b_U[index_mem_central] * dev_q_D_old[index_mem_uper] + dev_l_D_old[index_mem_uper]);
}
if(right == 1){
auxR = dev_mat_epsilon[index_mem_central] / (1 + dev_b_R[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DR = auxR * (dev_b_R[index_mem_central] * dev_q_L_old[index_mem_right] + dev_l_L_old[index_mem_right]);
}
if(down == 1){
auxD = dev_mat_epsilon[index_mem_central] / (1 + dev_b_D[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DD = auxD * (dev_b_D[index_mem_central] * dev_q_U_old[index_mem_down] + dev_l_U_old[index_mem_down]);
}
if(left == 1){
auxL = dev_mat_epsilon[index_mem_central] / (1 + dev_b_L[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DL = auxL * (dev_b_L[index_mem_central] * dev_q_R_old[index_mem_left] + dev_l_R_old[index_mem_left]);
}
dev_pressao[index_mem_central] = (dev_mat_font[index_mem_central] + DU + DR + DD + DL) / (auxU + auxR + auxD + auxL);
dev_q_L[index_mem_central] = auxL * dev_pressao[index_mem_central] - DL;
dev_q_R[index_mem_central] = auxR * dev_pressao[index_mem_central] - DR;
dev_q_U[index_mem_central] = auxU * dev_pressao[index_mem_central] - DU;
dev_q_D[index_mem_central] = auxD * dev_pressao[index_mem_central] - DD;
return 0;
}
__global__ void escoamento_monofasico(
double *dev_mat_perm, double *dev_mat_font, double *dev_mat_epsilon,
double *dev_q_R, double *dev_q_L, double *dev_q_U, double *dev_q_D,
double *dev_q_R_old, double *dev_q_L_old, double *dev_q_U_old, double *dev_q_D_old,
double *dev_l_R, double *dev_l_L, double *dev_l_U, double *dev_l_D,
double *dev_l_R_old, double *dev_l_L_old, double *dev_l_U_old, double *dev_l_D_old,
double *dev_b_R, double *dev_b_L, double *dev_b_U, double *dev_b_D,
double *dev_b_R_old, double *dev_b_L_old, double *dev_b_U_old, double *dev_b_D_old,
double *dev_pressao, double *dev_pressao_old,
double *dev_aux, double dev_erro, double dev_media, double dev_sum1, double dev_sum2
){
/*int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
a[offset] = offset;*/
/*vificar as condies de contorno*/
int flag_thread_centrais = 1;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
/*int offset = (blockDim.x * gridDim.x) + 1; // deslocamento para o tamanho da regio (tam_regiao = n + 2)
*/
int tid = x + y * blockDim.x * gridDim.x;
//verificar esse deslocamento para n causar problema (somente na hora de armazenar utilizar o deslocamento)
//int tid = (x + y * blockDim.x * gridDim.x) + offset; // tid fornece o indice do vetor
int dimensao_x = blockDim.x * gridDim.x;
int dimensao_y = blockDim.y * gridDim.y;
int eq_tid_cant_sup_dir = blockDim.x * gridDim.x - 1; // posio extremo sup direito
int eq_tid_cant_inf_dir = ((gridDim.x * blockDim.x) * (gridDim.y * blockDim.y)) - 1; // posio extremo inf direito
int eq_tid_cant_inf_esq = (gridDim.x * blockDim.x) * (gridDim.y * blockDim.y - 1); // posio extremo inf esquerdo
if(tid == 0){//canto superior esquerdo
/*VERIFICAR AS CONDIES DE CONTORNO*/
/*
* calcula_pressao_velocidade();
*
* Param: int tid, int uper, int right, int down, int left,
double *dev_mat_epsilon, double *dev_pressao,double *dev_mat_font,
double *dev_l_U_old, double *dev_l_D_old, double *dev_l_R_old, double *dev_l_L_old,
double *dev_b_U, double *dev_b_D, double *dev_b_R, double *dev_b_L,
double *dev_q_U, double *dev_q_D, double *dev_q_R, double *dev_q_L,
double *dev_q_U_old, double *dev_q_D_old, double *dev_q_R_old, double *dev_q_L_old
*
*/
calcula_pressao_velocidade( tid, 0, 1, 1, 0,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
/*
*
* calcula_pressao_velocidade();
*
* param: int tid,
double *dev_l_U, double *dev_l_D, double *dev_l_R, double *dev_l_L,
double *dev_l_U_old, double *dev_l_D_old, double *dev_l_R_old, double *dev_l_L_old,
double *dev_b_U, double *dev_b_D, double *dev_b_R, double *dev_b_L,
double *dev_q_U, double *dev_q_D, double *dev_q_R, double *dev_q_L,
double *dev_q_U_old, double *dev_q_D_old, double *dev_q_R_old, double *dev_q_L_old
*
*/
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_sup_dir){//canto superior direito
/*VERIFICAR AS CONDIES DE CONTORNO*/
/*
* calcula_pressao_velocidade();
*
* Param: int tid, int uper, int right, int down, int left,
double *dev_mat_epsilon, double *dev_pressao,
double *dev_l_U_old, double *dev_l_D_old, double *dev_l_R_old, double *dev_l_L_old,
double *dev_b_U, double *dev_b_D, double *dev_b_R, double *dev_b_L,
double *dev_q_U_old, double *dev_q_D_old, double *dev_q_R_old, double *dev_q_L_old
*
*/
calcula_pressao_velocidade( tid, 0, 0, 1, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_esq){//canto inferior esquerdo
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 0, 1, 1, 0,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_dir){//canto inferior direito
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 0, 0, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_sup_dir)){//fronteira superior
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 0, 1, 1, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_dir) && (tid % dimensao_x == eq_tid_cant_sup_dir)){ //fronteira direita
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 0, 1, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_inf_esq) && (tid < eq_tid_cant_inf_dir)){ //fronteira inferior
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 0, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_inf_dir) && (tid < eq_tid_cant_inf_esq) && (tid % dimensao_y == 0)){//fronteira esquerda
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 1, 0,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if(flag_thread_centrais){
/*VERIFICAR AS CONDIES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 1, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
}
/*
*
*SINCRONIZA
*COMENTARIOS
*ALOCAR VARIVEL aux com o tamanho de "tids"
*VERIFICAR ATOMICIDADE PRA VALORES FLOAT
*VERIFICAR ALOCAO DAS MEMRIAS GLOBAIS
*alocar memria erro
*alocar double media = 0.0, sum1 = 0.0, sum2 = 0.0;
*/
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_media = dev_media + dev_pressao[i];
}
dev_media = dev_media / (eq_tid_cant_inf_dir + 1);
}
__syncthreads();
dev_pressao[tid] -= dev_media;
dev_l_D[tid] -= dev_media;
dev_l_U[tid] -= dev_media;
dev_l_L[tid] -= dev_media;
dev_l_R[tid] -= dev_media;
//avaliando criterio de convergencia
dev_aux[tid] = dev_pressao[tid] - dev_pressao_old[tid];
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_sum1 += dev_aux[i] * dev_aux[i];
dev_sum2 += dev_pressao[i] * dev_pressao[i];
}
dev_erro = sqrt(dev_sum1/dev_sum2);
}
__syncthreads();
if (dev_erro < 1e-5)
return;
// break;
dev_pressao_old[tid] = dev_pressao_old[tid];
dev_q_U_old[tid] = dev_q_U[tid];
dev_q_R_old[tid] = dev_q_R[tid];
dev_q_L_old[tid] = dev_q_L[tid];
dev_q_D_old[tid] = dev_q_D[tid];
dev_l_D_old[tid] = dev_l_D[tid];
dev_l_U_old[tid] = dev_l_U[tid];
dev_l_L_old[tid] = dev_l_L[tid];
dev_l_R_old[tid] = dev_l_R[tid];
/*
* Imponiendo a media cero na distribuicao de presiones
* Calculo de la media
*/
/*
atomicAdd( &media, dev_pressao[tid] );
//atomicSub( &aux[tid], dev_pressao[tid] - dev_pressao_old[tid] );
__syncthreads();
dev_pressao[tid] -= M;
dev_l_D[tid] -= M;
dev_l_U[tid] -= M;
dev_l_L[tid] -= M;
dev_l_R[tid] -= M;
//avaliando criterio de convergencia
aux[tid] = dev_pressao[tid] - dev_b_D_old[tid];
__syncthreads();
atomicAdd( &sum1, aux[tid] * aux[tid] );
atomicAdd( &sum2, dev_pressao[tid] * dev_pressao[tid] );
__syncthreads();
if(tid == 0)
erro = sqrt(sum1/sum2);
if (erro < 1e-5) return 0;
p_old[j][k] = p[j][k];
dev_pressao_old[tid] = dev_pressao_old[tid];
dev_q_U_old[tid] = dev_q_U[tid];
dev_q_R_old[tid] = dev_q_R[tid];
dev_q_L_old[tid] = dev_q_L[tid];
dev_q_D_old[tid] = dev_q_D[tid];
dev_l_D_old[tid] = dev_l_D[tid];
dev_l_U_old[tid] = dev_l_U[tid];
dev_l_L_old[tid] = dev_l_L[tid];
dev_l_R_old[tid] = dev_l_R[tid];*/
}
int main(void){
le_entrada();
inicializa_parametros();
cal_cond_robin();
parametro_independentes();
int i = 0, j = 0;
/*
printf("\ntam_mat_interna = %d\n", tam_mat_interna);
printf("tam_mat_real = %d\n", tam_mat_real);
printf("max_interacoes = %d\n", max_interacoes);
printf("op_contorno = %d\n", op_contorno);
printf("tam_regiao = %lf\n", tam_regiao);
printf("erro_max = %lf\n", erro_max);
printf("valor_contor = %lf\n", valor_contor);
printf("\n\nmat_font:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_font[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nmat_perm:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_perm[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nmat_epsilon:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_epsilon[i*tam_mat_real + j]);
printf("\n");
}
*/
printf("\n\nb_U:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_U[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_R:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_R[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_D:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_D[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_L:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_L[i*tam_mat_real + j]);
printf("\n");
}
system("pause");
return 0;
}
char le_entrada(){
printf("\n\n\t\t - - CARREGANDO ENTRADA - - \n\n");
FILE *arq = NULL;
arq = fopen("../dir_entrada/parametro_entrada.txt", "r");
if(arq == NULL){
printf("Erro ao abrir aquivo: 'parametro_entrada.txt'\n\t\tCertifique-se que o arquivo exite.\n");
exit(1);
}
else{
printf("\t\t - - LENDO ARQUIVO DE ENTRADA - -\n");
/*char c[2], dados[255], buffer[255];*/
char buffer[255];
int cont = 1;
while(cont < 9){
fscanf(arq, "%s", buffer);
//puts(buffer);
int i = 0, j = 0;
switch(strlen(buffer)){
case 8: //erro_maximo
fscanf(arq, "%lf", &erro_max);
break;
case 10: //tam_regiao
fscanf(arq, "%lf", &tam_regiao);
break;
case 11: //opcao_contorno
fscanf(arq, "%d", &op_contorno);
break;
case 12: //valor_contor
fscanf(arq, "%lf", &valor_contor);
break;
case 14: //max_interacoes
fscanf(arq, "%d", &max_interacoes);
break;
case 15: //tam_mat_interna
fscanf(arq, "%d", &tam_mat_interna);
break;
case 16: //matriz_de_fontes
//uso (tam_mat_interna + 2) - pois ainda no inicializei 'tam_mat_real'
mat_font = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%lf", &mat_font[i*(tam_mat_interna+2) + j]);
break;
case 18: //matriz_permeabilidade
mat_perm = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
mat_epsilon = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%lf", &mat_perm[i*(tam_mat_interna+2) + j]);
break;
default:
printf("\n\n\t\tHouve algum erro no aquivo de entrada!\n\n");
return 0;
}
//int tam = strlen(buffer);
cont++;
}
printf("\t\t - - ARQUIVO DE ENTRADA CARREGADO - -\n");
}
printf("\n\n\t\t - - ENTRADA CARREGA - - \n\n");
return 1;
}
char inicializa_parametros(){
printf("\n\n\t\t- - INICIALIZANDO PARAMETROS - - \n\n\n");
tam_mat_real = tam_mat_interna + 2;
h = tam_regiao / tam_mat_interna;
q_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_R == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_L == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_U == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_D == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_R_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_L_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_U_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_D_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_q_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_R == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_L == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_U == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_D == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_R_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_L_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_U_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_D_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_l_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_R == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_L == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_U == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_D == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_R_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_L_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_U_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_D_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_b_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
pressao = aloca_matriz(tam_mat_real, tam_mat_real);
if(pressao == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_pressao, tam_mat_real * tam_mat_real * sizeof(double) ) );
pressao_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(pressao_old == NULL)
HANDLE_ERROR( hipMalloc( (void**)&dev_pressao_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_aux, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( hipMemset( dev_aux, 0, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_erro, sizeof(double) ) );
HANDLE_ERROR( hipMemset( dev_erro, 0, sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_media, sizeof(double) ) );
HANDLE_ERROR( hipMemset( dev_media, 0, sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_sum1, sizeof(double) ) );
HANDLE_ERROR( hipMemset( dev_sum1, 0, sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_sum2, sizeof(double) ) );
HANDLE_ERROR( hipMemset( dev_sum2, 0, sizeof(double) ) );
int i = 0;
switch(op_contorno){
case 1: //Inicializa contorno superior
for(i = 0; i < tam_mat_real -1; i++){
q_D[i] = valor_contor;
q_D_old[i] = valor_contor;
}
break;
case 2://Inicializa contorno esquerdo
for(i = 0; i < tam_mat_real; i++){
q_R[i*tam_mat_real] = valor_contor;
q_R_old[i*tam_mat_real] = valor_contor;
}
break;
case 3://Inicializa contorno direito
for(i = 0; i < tam_mat_real; i++){
q_L[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
q_L_old[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
}
break;
case 4://Inicializa contorno inferior
for(i = 0; i < tam_mat_real; i++){
q_L[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
q_L_old[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
}
break;
default:
printf("\n\n\t\t - - OCORREU ALGUM ERRO NA OPCAO DE CONTORNO - - \n\n");
break;
}
printf("\n\n\t\t- - FIM DA INICIALIZACAO PARAMETROS - - \n\n\n");
return 1;
}
double * aloca_matriz(int L, int C){
double *aux = NULL;
aux = (double *) calloc(L * C, sizeof(double));
if(aux == NULL){
printf("\n\n\t\tErro ao alocar memoria\n\n");
exit(1);
}else{
return aux;
}
return NULL;
}
/*
*
*VERIFICAR RETORNO
*
*/
void cal_cond_robin(){
double keff = 0.0, numerador = 0.0, denominador = 0.0;
double C = 1.0; // Cte adimensional que se ajusta experimentalmente C = 1.0
//Canto superior esquerdo
numerador = ( 2 * mat_perm[tam_mat_real + 1] * mat_perm[tam_mat_real + 2] );
denominador = ( mat_perm[tam_mat_real + 1] + mat_perm[tam_mat_real + 2] );
keff = numerador / denominador;
b_R[tam_mat_real + 1] = C*h/keff;
numerador = (2 * mat_perm[tam_mat_real + 1] * mat_perm[(2*tam_mat_real) + 1]);
denominador = ( mat_perm[tam_mat_real + 1] + mat_perm[(2*tam_mat_real) + 1]);
keff = numerador / denominador;
b_D[tam_mat_real + 1] = C*h/keff;
//Canto superior direito
numerador = ( 2 * mat_perm[tam_mat_real + tam_mat_interna] * mat_perm[tam_mat_real + (tam_mat_interna - 1)] );
denominador = ( mat_perm[tam_mat_real + tam_mat_interna] + mat_perm[tam_mat_real + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[tam_mat_real + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + tam_mat_interna] * mat_perm[(2 * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[tam_mat_real + tam_mat_interna] + mat_perm[(2 * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_D[tam_mat_real + tam_mat_interna] = C*h/keff;
//Canto infeior esquerdo
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + 1] * mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + 1] + mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
keff = numerador / denominador;
b_U[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + 1] * mat_perm[(tam_mat_real * tam_mat_interna) + 2] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + 1] + mat_perm[(tam_mat_real * tam_mat_interna) + 2] );
keff = numerador / denominador;
b_R[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
//Canto infeior direito
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
keff = numerador / denominador;
b_U[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * mat_perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + mat_perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
//Calculo das fronteiras e regio interna para betas
int i = 0;
for(i = 2; i < tam_mat_interna; i ++){
//Calcula fronteira superior
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[tam_mat_real + (i-1)] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[tam_mat_real + (i-1)] );
keff = numerador / denominador;
b_L[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[tam_mat_real + (i+1)] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[tam_mat_real + (i+1)] );
keff = numerador / denominador;
b_R[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[(2 * tam_mat_real) + i] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[(2 * tam_mat_real) + i] );
keff = numerador / denominador;
b_D[tam_mat_real + i] = C*h/keff;
//Calcula fronteira esquerda
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[((i - 1) * tam_mat_real) + 1] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[((i - 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[(i * tam_mat_real) + 2] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[(i * tam_mat_real) + 2] );
keff = numerador / denominador;
b_R[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[((i + 1) * tam_mat_real) + 1] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[((i + 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + 1] = C*h/keff;
//Calcula fronteira inferior
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
keff = numerador / denominador;
b_L[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
keff = numerador / denominador;
b_U[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
keff = numerador / denominador;
b_R[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
//Calcula fronteira direita
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[((i-1) * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[((i-1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[((i+1) * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[((i+1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
//Calcula dados internos
int j = 0;
for(j = 2; j < tam_mat_interna; j ++){
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[(i * tam_mat_real) + (j - 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[(i * tam_mat_real) + (j - 1)] );
keff = numerador / denominador;
b_L[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[(i * tam_mat_real) + (j + 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[(i * tam_mat_real) + (j + 1)] );
keff = numerador / denominador;
b_R[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[((i - 1) * tam_mat_real) + j] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[((i - 1) * tam_mat_real) + j] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[((i + 1) * tam_mat_real) + j] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[((i + 1) * tam_mat_real) + j] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + j] = C*h/keff;
}
}
}
/*
*
*VERIFICAR RETORNO
*
*/
char parametro_independentes(){
int i = 0, j = 0;
double constante = 2/h;
for(i = 0; i < tam_mat_real; i ++)
for(j = 0; j < tam_mat_real; j++){
mat_epsilon[i*tam_mat_real + j] = constante * mat_perm[i*tam_mat_real + j];
mat_font[i*tam_mat_real + j] *= h;
}
return 0;
}
char copia_dados_para_gpu(){
HANDLE_ERROR( hipMemcpy( dev_q_R, q_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_L, q_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_U, q_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_D, q_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_R_old, q_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_L_old, q_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_U_old, q_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_q_D_old, q_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_R, l_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_L, l_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_U, l_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_D, l_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_R_old, l_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_L_old, l_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_U_old, l_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_l_D_old, l_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_R, b_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_L, b_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_U, b_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_D, b_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_R_old, b_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_L_old, b_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_U_old, b_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b_D_old, b_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_pressao, pressao, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_pressao_old, pressao_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_mat_font, mat_font, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_mat_perm, mat_perm, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_mat_epsilon, mat_epsilon, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
return 0;
}
void copia_dados_para_cpu(){
HANDLE_ERROR( hipMemcpy( q_R, dev_q_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_L, dev_q_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_U, dev_q_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_D, dev_q_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_R_old, dev_q_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_L_old, dev_q_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_U_old, dev_q_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( q_D_old, dev_q_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_R, dev_l_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_L, dev_l_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_U, dev_l_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_D, dev_l_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_R_old, dev_l_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_L_old, dev_l_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_U_old, dev_l_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( l_D_old, dev_l_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_R, dev_b_R, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_L, dev_b_L, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_U, dev_b_U, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_D, dev_b_D, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_R_old, dev_b_R_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_L_old, dev_b_L_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_U_old, dev_b_U_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( b_D_old, dev_b_D_old, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( mat_font, dev_mat_font, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( mat_perm, dev_mat_perm, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( mat_epsilon, dev_mat_epsilon, tam_mat_real * tam_mat_real * sizeof(double),
hipMemcpyHostToDevice ) );
} | 7ad50e8265dc638633db87d77b1e0a94d9a7c919.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#define N 100
#define DIM 2
char le_entrada();
char inicializa_parametros();
double * aloca_matriz(int, int);
void cal_cond_robin();
char parametro_independentes();
char copia_dados_para_gpu();
void copia_dados_para_cpu();
//char calcula_pressao_velocidade(int, int, int, int, int);
//char atualiza_mult_lagrange(int tid);
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
//- - - - - - - - - - - - - - GLOBAIS - - - - - - - - - - - - - - //
/* - - - - - - - Entradas Externas - - - - - - - */
int tam_mat_interna = 3, tam_mat_real = 3 + 2, max_interacoes = 1000, op_contorno = 1;
double tam_regiao = 20000.00, erro_max = 1e-5, valor_contor = 2.00;
double h = 20000.00 / 3; // ALTURA H = TAM_REGIAO / TAM_MAT_INTERNA
double *mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL;
/* - - - - - - - Fim das Entradas Externas - - - - - - - */
/* - - - - - - - Ponteiros para CPU - - - - - - - */
double *q_R = NULL, *q_L = NULL, *q_U = NULL, *q_D = NULL;
double *q_R_old = NULL, *q_L_old = NULL, *q_U_old = NULL, *q_D_old = NULL;
double *l_R = NULL, *l_L = NULL, *l_U = NULL, *l_D = NULL;
double *l_R_old = NULL, *l_L_old = NULL, *l_U_old = NULL, *l_D_old = NULL;
double *b_R = NULL, *b_L = NULL, *b_U = NULL, *b_D = NULL;
double *b_R_old = NULL, *b_L_old = NULL, *b_U_old = NULL, *b_D_old = NULL;
double *pressao = NULL, *pressao_old = NULL;
/* - - - - - - - Ponteiros para GPU - - - - - - - */
double *dev_mat_perm = NULL, *dev_mat_font = NULL, *dev_mat_epsilon = NULL;
double *dev_q_R = NULL, *dev_q_L = NULL, *dev_q_U = NULL, *dev_q_D = NULL;
double *dev_q_R_old = NULL, *dev_q_L_old = NULL, *dev_q_U_old = NULL, *dev_q_D_old = NULL;
double *dev_l_R = NULL, *dev_l_L = NULL, *dev_l_U = NULL, *dev_l_D = NULL;
double *dev_l_R_old = NULL, *dev_l_L_old = NULL, *dev_l_U_old = NULL, *dev_l_D_old = NULL;
double *dev_b_R = NULL, *dev_b_L = NULL, *dev_b_U = NULL, *dev_b_D = NULL;
double *dev_b_R_old = NULL, *dev_b_L_old = NULL, *dev_b_U_old = NULL, *dev_b_D_old = NULL;
double *dev_pressao = NULL, *dev_pressao_old = NULL;
double *dev_aux = NULL, *dev_erro = NULL, *dev_media = NULL, *dev_sum1 = NULL, *dev_sum2 = NULL;
//- - - - - - - - - - - - - - FIM - GLOBAIS - - - - - - - - - - - - - - //
__device__ char atualiza_mult_lagrange(int tid,
double *dev_l_U, double *dev_l_D, double *dev_l_R, double *dev_l_L,
double *dev_l_U_old, double *dev_l_D_old, double *dev_l_R_old, double *dev_l_L_old,
double *dev_b_U, double *dev_b_D, double *dev_b_R, double *dev_b_L,
double *dev_q_U, double *dev_q_D, double *dev_q_R, double *dev_q_L,
double *dev_q_U_old, double *dev_q_D_old, double *dev_q_R_old, double *dev_q_L_old
){
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1;
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
dev_l_U[index_mem_central] = dev_b_U[index_mem_central] * (dev_q_U[index_mem_central] + dev_q_D_old[index_mem_uper]) + dev_l_D_old[index_mem_uper];
dev_l_D[index_mem_central] = dev_b_D[index_mem_central] * (dev_q_D[index_mem_central] + dev_q_U_old[index_mem_down]) + dev_l_U_old[index_mem_down];
dev_l_R[index_mem_central] = dev_b_R[index_mem_central] * (dev_q_R[index_mem_central] + dev_q_L_old[index_mem_right]) + dev_l_L_old[index_mem_right];
dev_l_L[index_mem_central] = dev_b_L[index_mem_central] * (dev_q_L[index_mem_central] + dev_q_R_old[index_mem_left]) + dev_l_R_old[index_mem_left];
return 0;
}
__device__ char calcula_pressao_velocidade(int tid, int uper, int right, int down, int left,
double *dev_mat_epsilon, double *dev_pressao, double *dev_mat_font,
double *dev_l_U_old, double *dev_l_D_old, double *dev_l_R_old, double *dev_l_L_old,
double *dev_b_U, double *dev_b_D, double *dev_b_R, double *dev_b_L,
double *dev_q_U, double *dev_q_D, double *dev_q_R, double *dev_q_L,
double *dev_q_U_old, double *dev_q_D_old, double *dev_q_R_old, double *dev_q_L_old
){
double auxU = 0.0, auxD = 0.0, auxR = 0.0, auxL = 0.0, DU = 0.0, DD = 0.0, DR = 0.0, DL = 0.0;
int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0;
int index_mem_left = 0, index_mem_right = 0;
int comprimento_kernel = blockDim.x * gridDim.x;
int offset = (blockDim.x * gridDim.x) + 1;
index_mem_central = tid + ((tid/comprimento_kernel)*2) + offset;
index_mem_uper = index_mem_central - (offset -1); // (offset -1) = comprimento do kernel
index_mem_down = index_mem_central + (offset -1);
index_mem_left = index_mem_central - 1;
index_mem_right = index_mem_central + 1;
if(uper == 1){
auxU = dev_mat_epsilon[index_mem_central] / (1 + dev_b_U[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DU = auxU * (dev_b_U[index_mem_central] * dev_q_D_old[index_mem_uper] + dev_l_D_old[index_mem_uper]);
}
if(right == 1){
auxR = dev_mat_epsilon[index_mem_central] / (1 + dev_b_R[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DR = auxR * (dev_b_R[index_mem_central] * dev_q_L_old[index_mem_right] + dev_l_L_old[index_mem_right]);
}
if(down == 1){
auxD = dev_mat_epsilon[index_mem_central] / (1 + dev_b_D[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DD = auxD * (dev_b_D[index_mem_central] * dev_q_U_old[index_mem_down] + dev_l_U_old[index_mem_down]);
}
if(left == 1){
auxL = dev_mat_epsilon[index_mem_central] / (1 + dev_b_L[index_mem_central] * dev_mat_epsilon[index_mem_central]);
DL = auxL * (dev_b_L[index_mem_central] * dev_q_R_old[index_mem_left] + dev_l_R_old[index_mem_left]);
}
dev_pressao[index_mem_central] = (dev_mat_font[index_mem_central] + DU + DR + DD + DL) / (auxU + auxR + auxD + auxL);
dev_q_L[index_mem_central] = auxL * dev_pressao[index_mem_central] - DL;
dev_q_R[index_mem_central] = auxR * dev_pressao[index_mem_central] - DR;
dev_q_U[index_mem_central] = auxU * dev_pressao[index_mem_central] - DU;
dev_q_D[index_mem_central] = auxD * dev_pressao[index_mem_central] - DD;
return 0;
}
__global__ void escoamento_monofasico(
double *dev_mat_perm, double *dev_mat_font, double *dev_mat_epsilon,
double *dev_q_R, double *dev_q_L, double *dev_q_U, double *dev_q_D,
double *dev_q_R_old, double *dev_q_L_old, double *dev_q_U_old, double *dev_q_D_old,
double *dev_l_R, double *dev_l_L, double *dev_l_U, double *dev_l_D,
double *dev_l_R_old, double *dev_l_L_old, double *dev_l_U_old, double *dev_l_D_old,
double *dev_b_R, double *dev_b_L, double *dev_b_U, double *dev_b_D,
double *dev_b_R_old, double *dev_b_L_old, double *dev_b_U_old, double *dev_b_D_old,
double *dev_pressao, double *dev_pressao_old,
double *dev_aux, double dev_erro, double dev_media, double dev_sum1, double dev_sum2
){
/*int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
a[offset] = offset;*/
/*vificar as condições de contorno*/
int flag_thread_centrais = 1;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
/*int offset = (blockDim.x * gridDim.x) + 1; // deslocamento para o tamanho da região (tam_regiao = n + 2)
*/
int tid = x + y * blockDim.x * gridDim.x;
//verificar esse deslocamento para n causar problema (somente na hora de armazenar utilizar o deslocamento)
//int tid = (x + y * blockDim.x * gridDim.x) + offset; // tid fornece o indice do vetor
int dimensao_x = blockDim.x * gridDim.x;
int dimensao_y = blockDim.y * gridDim.y;
int eq_tid_cant_sup_dir = blockDim.x * gridDim.x - 1; // posição extremo sup direito
int eq_tid_cant_inf_dir = ((gridDim.x * blockDim.x) * (gridDim.y * blockDim.y)) - 1; // posição extremo inf direito
int eq_tid_cant_inf_esq = (gridDim.x * blockDim.x) * (gridDim.y * blockDim.y - 1); // posição extremo inf esquerdo
if(tid == 0){//canto superior esquerdo
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
/*
* calcula_pressao_velocidade();
*
* Param: int tid, int uper, int right, int down, int left,
double *dev_mat_epsilon, double *dev_pressao,double *dev_mat_font,
double *dev_l_U_old, double *dev_l_D_old, double *dev_l_R_old, double *dev_l_L_old,
double *dev_b_U, double *dev_b_D, double *dev_b_R, double *dev_b_L,
double *dev_q_U, double *dev_q_D, double *dev_q_R, double *dev_q_L,
double *dev_q_U_old, double *dev_q_D_old, double *dev_q_R_old, double *dev_q_L_old
*
*/
calcula_pressao_velocidade( tid, 0, 1, 1, 0,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
/*
*
* calcula_pressao_velocidade();
*
* param: int tid,
double *dev_l_U, double *dev_l_D, double *dev_l_R, double *dev_l_L,
double *dev_l_U_old, double *dev_l_D_old, double *dev_l_R_old, double *dev_l_L_old,
double *dev_b_U, double *dev_b_D, double *dev_b_R, double *dev_b_L,
double *dev_q_U, double *dev_q_D, double *dev_q_R, double *dev_q_L,
double *dev_q_U_old, double *dev_q_D_old, double *dev_q_R_old, double *dev_q_L_old
*
*/
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_sup_dir){//canto superior direito
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
/*
* calcula_pressao_velocidade();
*
* Param: int tid, int uper, int right, int down, int left,
double *dev_mat_epsilon, double *dev_pressao,
double *dev_l_U_old, double *dev_l_D_old, double *dev_l_R_old, double *dev_l_L_old,
double *dev_b_U, double *dev_b_D, double *dev_b_R, double *dev_b_L,
double *dev_q_U_old, double *dev_q_D_old, double *dev_q_R_old, double *dev_q_L_old
*
*/
calcula_pressao_velocidade( tid, 0, 0, 1, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_esq){//canto inferior esquerdo
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 0, 1, 1, 0,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if(tid == eq_tid_cant_inf_dir){//canto inferior direito
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 0, 0, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_sup_dir)){//fronteira superior
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 0, 1, 1, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_dir) && (tid % dimensao_x == eq_tid_cant_sup_dir)){ //fronteira direita
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 0, 1, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if((tid > eq_tid_cant_inf_esq) && (tid < eq_tid_cant_inf_dir)){ //fronteira inferior
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 0, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if((tid > 0) && (tid < eq_tid_cant_inf_dir) && (tid < eq_tid_cant_inf_esq) && (tid % dimensao_y == 0)){//fronteira esquerda
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 1, 0,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
flag_thread_centrais = 0;
}
if(flag_thread_centrais){
/*VERIFICAR AS CONDIÇÕES DE CONTORNO*/
calcula_pressao_velocidade( tid, 1, 1, 1, 1,
dev_mat_epsilon, dev_pressao, dev_mat_font,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old,dev_q_L_old);
atualiza_mult_lagrange( tid,
dev_l_U, dev_l_D, dev_l_R, dev_l_L,
dev_l_U_old, dev_l_D_old, dev_l_R_old, dev_l_L_old,
dev_b_U, dev_b_D, dev_b_R, dev_b_L,
dev_q_U, dev_q_D, dev_q_R, dev_q_L,
dev_q_U_old, dev_q_D_old, dev_q_R_old, dev_q_L_old);
}
/*
*
*SINCRONIZA
*COMENTARIOS
*ALOCAR VARIÁVEL aux com o tamanho de "tids"
*VERIFICAR ATOMICIDADE PRA VALORES FLOAT
*VERIFICAR ALOCAÇÃO DAS MEMÓRIAS GLOBAIS
*alocar memória erro
*alocar double media = 0.0, sum1 = 0.0, sum2 = 0.0;
*/
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_media = dev_media + dev_pressao[i];
}
dev_media = dev_media / (eq_tid_cant_inf_dir + 1);
}
__syncthreads();
dev_pressao[tid] -= dev_media;
dev_l_D[tid] -= dev_media;
dev_l_U[tid] -= dev_media;
dev_l_L[tid] -= dev_media;
dev_l_R[tid] -= dev_media;
//avaliando criterio de convergencia
dev_aux[tid] = dev_pressao[tid] - dev_pressao_old[tid];
__syncthreads();
if(tid == eq_tid_cant_inf_dir){
int i = 0;
for(i = 0; i <= eq_tid_cant_inf_dir; i++){
dev_sum1 += dev_aux[i] * dev_aux[i];
dev_sum2 += dev_pressao[i] * dev_pressao[i];
}
dev_erro = sqrt(dev_sum1/dev_sum2);
}
__syncthreads();
if (dev_erro < 1e-5)
return;
// break;
dev_pressao_old[tid] = dev_pressao_old[tid];
dev_q_U_old[tid] = dev_q_U[tid];
dev_q_R_old[tid] = dev_q_R[tid];
dev_q_L_old[tid] = dev_q_L[tid];
dev_q_D_old[tid] = dev_q_D[tid];
dev_l_D_old[tid] = dev_l_D[tid];
dev_l_U_old[tid] = dev_l_U[tid];
dev_l_L_old[tid] = dev_l_L[tid];
dev_l_R_old[tid] = dev_l_R[tid];
/*
* Imponiendo a media cero na distribuicao de presiones
* Calculo de la media
*/
/*
atomicAdd( &media, dev_pressao[tid] );
//atomicSub( &aux[tid], dev_pressao[tid] - dev_pressao_old[tid] );
__syncthreads();
dev_pressao[tid] -= M;
dev_l_D[tid] -= M;
dev_l_U[tid] -= M;
dev_l_L[tid] -= M;
dev_l_R[tid] -= M;
//avaliando criterio de convergencia
aux[tid] = dev_pressao[tid] - dev_b_D_old[tid];
__syncthreads();
atomicAdd( &sum1, aux[tid] * aux[tid] );
atomicAdd( &sum2, dev_pressao[tid] * dev_pressao[tid] );
__syncthreads();
if(tid == 0)
erro = sqrt(sum1/sum2);
if (erro < 1e-5) return 0;
p_old[j][k] = p[j][k];
dev_pressao_old[tid] = dev_pressao_old[tid];
dev_q_U_old[tid] = dev_q_U[tid];
dev_q_R_old[tid] = dev_q_R[tid];
dev_q_L_old[tid] = dev_q_L[tid];
dev_q_D_old[tid] = dev_q_D[tid];
dev_l_D_old[tid] = dev_l_D[tid];
dev_l_U_old[tid] = dev_l_U[tid];
dev_l_L_old[tid] = dev_l_L[tid];
dev_l_R_old[tid] = dev_l_R[tid];*/
}
int main(void){
le_entrada();
inicializa_parametros();
cal_cond_robin();
parametro_independentes();
int i = 0, j = 0;
/*
printf("\ntam_mat_interna = %d\n", tam_mat_interna);
printf("tam_mat_real = %d\n", tam_mat_real);
printf("max_interacoes = %d\n", max_interacoes);
printf("op_contorno = %d\n", op_contorno);
printf("tam_regiao = %lf\n", tam_regiao);
printf("erro_max = %lf\n", erro_max);
printf("valor_contor = %lf\n", valor_contor);
printf("\n\nmat_font:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_font[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nmat_perm:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_perm[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nmat_epsilon:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", mat_epsilon[i*tam_mat_real + j]);
printf("\n");
}
*/
printf("\n\nb_U:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_U[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_R:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_R[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_D:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_D[i*tam_mat_real + j]);
printf("\n");
}
printf("\n\nb_L:\n");
for(i = 0; i < tam_mat_real; i ++){
for(j = 0; j < tam_mat_real; j++)
printf("%1.e ", b_L[i*tam_mat_real + j]);
printf("\n");
}
system("pause");
return 0;
}
char le_entrada(){
printf("\n\n\t\t - - CARREGANDO ENTRADA - - \n\n");
FILE *arq = NULL;
arq = fopen("../dir_entrada/parametro_entrada.txt", "r");
if(arq == NULL){
printf("Erro ao abrir aquivo: 'parametro_entrada.txt'\n\t\tCertifique-se que o arquivo exite.\n");
exit(1);
}
else{
printf("\t\t - - LENDO ARQUIVO DE ENTRADA - -\n");
/*char c[2], dados[255], buffer[255];*/
char buffer[255];
int cont = 1;
while(cont < 9){
fscanf(arq, "%s", buffer);
//puts(buffer);
int i = 0, j = 0;
switch(strlen(buffer)){
case 8: //erro_maximo
fscanf(arq, "%lf", &erro_max);
break;
case 10: //tam_regiao
fscanf(arq, "%lf", &tam_regiao);
break;
case 11: //opcao_contorno
fscanf(arq, "%d", &op_contorno);
break;
case 12: //valor_contor
fscanf(arq, "%lf", &valor_contor);
break;
case 14: //max_interacoes
fscanf(arq, "%d", &max_interacoes);
break;
case 15: //tam_mat_interna
fscanf(arq, "%d", &tam_mat_interna);
break;
case 16: //matriz_de_fontes
//uso (tam_mat_interna + 2) - pois ainda não inicializei 'tam_mat_real'
mat_font = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%lf", &mat_font[i*(tam_mat_interna+2) + j]);
break;
case 18: //matriz_permeabilidade
mat_perm = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
mat_epsilon = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2);
for(i = 1; i < (tam_mat_interna + 2) - 1; i ++)
for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++)
fscanf(arq, "%lf", &mat_perm[i*(tam_mat_interna+2) + j]);
break;
default:
printf("\n\n\t\tHouve algum erro no aquivo de entrada!\n\n");
return 0;
}
//int tam = strlen(buffer);
cont++;
}
printf("\t\t - - ARQUIVO DE ENTRADA CARREGADO - -\n");
}
printf("\n\n\t\t - - ENTRADA CARREGA - - \n\n");
return 1;
}
char inicializa_parametros(){
printf("\n\n\t\t- - INICIALIZANDO PARAMETROS - - \n\n\n");
tam_mat_real = tam_mat_interna + 2;
h = tam_regiao / tam_mat_interna;
q_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_R == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_L == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_U == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_D == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_R_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_L_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_U_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
q_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(q_D_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_q_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_R == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_L == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_U == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_D == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_R_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_L_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_U_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
l_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(l_D_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_l_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_R = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_R == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_R, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_L = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_L == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_L, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_U = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_U == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_U, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_D = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_D == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_D, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_R_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_R_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_R_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_L_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_L_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_L_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_U_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_U_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_U_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
b_D_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(b_D_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_b_D_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
pressao = aloca_matriz(tam_mat_real, tam_mat_real);
if(pressao == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao, tam_mat_real * tam_mat_real * sizeof(double) ) );
pressao_old = aloca_matriz(tam_mat_real, tam_mat_real);
if(pressao_old == NULL)
HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao_old, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_aux, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( cudaMemset( dev_aux, 0, tam_mat_real * tam_mat_real * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_erro, sizeof(double) ) );
HANDLE_ERROR( cudaMemset( dev_erro, 0, sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_media, sizeof(double) ) );
HANDLE_ERROR( cudaMemset( dev_media, 0, sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_sum1, sizeof(double) ) );
HANDLE_ERROR( cudaMemset( dev_sum1, 0, sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_sum2, sizeof(double) ) );
HANDLE_ERROR( cudaMemset( dev_sum2, 0, sizeof(double) ) );
int i = 0;
switch(op_contorno){
case 1: //Inicializa contorno superior
for(i = 0; i < tam_mat_real -1; i++){
q_D[i] = valor_contor;
q_D_old[i] = valor_contor;
}
break;
case 2://Inicializa contorno esquerdo
for(i = 0; i < tam_mat_real; i++){
q_R[i*tam_mat_real] = valor_contor;
q_R_old[i*tam_mat_real] = valor_contor;
}
break;
case 3://Inicializa contorno direito
for(i = 0; i < tam_mat_real; i++){
q_L[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
q_L_old[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor;
}
break;
case 4://Inicializa contorno inferior
for(i = 0; i < tam_mat_real; i++){
q_L[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
q_L_old[(tam_mat_real-1)*tam_mat_real + i] = valor_contor;
}
break;
default:
printf("\n\n\t\t - - OCORREU ALGUM ERRO NA OPCAO DE CONTORNO - - \n\n");
break;
}
printf("\n\n\t\t- - FIM DA INICIALIZACAO PARAMETROS - - \n\n\n");
return 1;
}
double * aloca_matriz(int L, int C){
double *aux = NULL;
aux = (double *) calloc(L * C, sizeof(double));
if(aux == NULL){
printf("\n\n\t\tErro ao alocar memoria\n\n");
exit(1);
}else{
return aux;
}
return NULL;
}
/*
*
*VERIFICAR RETORNO
*
*/
void cal_cond_robin(){
double keff = 0.0, numerador = 0.0, denominador = 0.0;
double C = 1.0; // Cte adimensional que se ajusta experimentalmente C = 1.0
//Canto superior esquerdo
numerador = ( 2 * mat_perm[tam_mat_real + 1] * mat_perm[tam_mat_real + 2] );
denominador = ( mat_perm[tam_mat_real + 1] + mat_perm[tam_mat_real + 2] );
keff = numerador / denominador;
b_R[tam_mat_real + 1] = C*h/keff;
numerador = (2 * mat_perm[tam_mat_real + 1] * mat_perm[(2*tam_mat_real) + 1]);
denominador = ( mat_perm[tam_mat_real + 1] + mat_perm[(2*tam_mat_real) + 1]);
keff = numerador / denominador;
b_D[tam_mat_real + 1] = C*h/keff;
//Canto superior direito
numerador = ( 2 * mat_perm[tam_mat_real + tam_mat_interna] * mat_perm[tam_mat_real + (tam_mat_interna - 1)] );
denominador = ( mat_perm[tam_mat_real + tam_mat_interna] + mat_perm[tam_mat_real + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[tam_mat_real + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + tam_mat_interna] * mat_perm[(2 * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[tam_mat_real + tam_mat_interna] + mat_perm[(2 * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_D[tam_mat_real + tam_mat_interna] = C*h/keff;
//Canto infeior esquerdo
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + 1] * mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + 1] + mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] );
keff = numerador / denominador;
b_U[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + 1] * mat_perm[(tam_mat_real * tam_mat_interna) + 2] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + 1] + mat_perm[(tam_mat_real * tam_mat_interna) + 2] );
keff = numerador / denominador;
b_R[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff;
//Canto infeior direito
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + mat_perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] );
keff = numerador / denominador;
b_U[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * mat_perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
denominador = ( mat_perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + mat_perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff;
//Calculo das fronteiras e região interna para betas
int i = 0;
for(i = 2; i < tam_mat_interna; i ++){
//Calcula fronteira superior
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[tam_mat_real + (i-1)] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[tam_mat_real + (i-1)] );
keff = numerador / denominador;
b_L[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[tam_mat_real + (i+1)] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[tam_mat_real + (i+1)] );
keff = numerador / denominador;
b_R[tam_mat_real + i] = C*h/keff;
numerador = ( 2 * mat_perm[tam_mat_real + i] * mat_perm[(2 * tam_mat_real) + i] );
denominador = ( mat_perm[tam_mat_real + i] + mat_perm[(2 * tam_mat_real) + i] );
keff = numerador / denominador;
b_D[tam_mat_real + i] = C*h/keff;
//Calcula fronteira esquerda
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[((i - 1) * tam_mat_real) + 1] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[((i - 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[(i * tam_mat_real) + 2] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[(i * tam_mat_real) + 2] );
keff = numerador / denominador;
b_R[(i * tam_mat_real) + 1] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + 1] * mat_perm[((i + 1) * tam_mat_real) + 1] );
denominador = ( mat_perm[(i * tam_mat_real) + 1] + mat_perm[((i + 1) * tam_mat_real) + 1] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + 1] = C*h/keff;
//Calcula fronteira inferior
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[(tam_mat_interna * tam_mat_real) + (i - 1)] );
keff = numerador / denominador;
b_L[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[((tam_mat_interna - 1) * tam_mat_real) + i] );
keff = numerador / denominador;
b_U[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
numerador = ( 2 * mat_perm[(tam_mat_interna * tam_mat_real) + i] * mat_perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
denominador = ( mat_perm[(tam_mat_interna * tam_mat_real) + i] + mat_perm[(tam_mat_interna * tam_mat_real) + (i + 1)] );
keff = numerador / denominador;
b_R[(tam_mat_interna * tam_mat_real) + i] = C*h/keff;
//Calcula fronteira direita
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[((i-1) * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[((i-1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[(i * tam_mat_real) + (tam_mat_interna - 1)] );
keff = numerador / denominador;
b_L[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + tam_mat_interna] * mat_perm[((i+1) * tam_mat_real) + tam_mat_interna] );
denominador = ( mat_perm[(i * tam_mat_real) + tam_mat_interna] + mat_perm[((i+1) * tam_mat_real) + tam_mat_interna] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + tam_mat_interna] = C*h/keff;
//Calcula dados internos
int j = 0;
for(j = 2; j < tam_mat_interna; j ++){
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[(i * tam_mat_real) + (j - 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[(i * tam_mat_real) + (j - 1)] );
keff = numerador / denominador;
b_L[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[(i * tam_mat_real) + (j + 1)] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[(i * tam_mat_real) + (j + 1)] );
keff = numerador / denominador;
b_R[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[((i - 1) * tam_mat_real) + j] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[((i - 1) * tam_mat_real) + j] );
keff = numerador / denominador;
b_U[(i * tam_mat_real) + j] = C*h/keff;
numerador = ( 2 * mat_perm[(i * tam_mat_real) + j] * mat_perm[((i + 1) * tam_mat_real) + j] );
denominador = ( mat_perm[(i * tam_mat_real) + j] + mat_perm[((i + 1) * tam_mat_real) + j] );
keff = numerador / denominador;
b_D[(i * tam_mat_real) + j] = C*h/keff;
}
}
}
/*
*
*VERIFICAR RETORNO
*
*/
char parametro_independentes(){
int i = 0, j = 0;
double constante = 2/h;
for(i = 0; i < tam_mat_real; i ++)
for(j = 0; j < tam_mat_real; j++){
mat_epsilon[i*tam_mat_real + j] = constante * mat_perm[i*tam_mat_real + j];
mat_font[i*tam_mat_real + j] *= h;
}
return 0;
}
char copia_dados_para_gpu(){
HANDLE_ERROR( cudaMemcpy( dev_q_R, q_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_L, q_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_U, q_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_D, q_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_R_old, q_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_L_old, q_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_U_old, q_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_q_D_old, q_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_R, l_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_L, l_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_U, l_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_D, l_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_R_old, l_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_L_old, l_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_U_old, l_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_l_D_old, l_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_R, b_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_L, b_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_U, b_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_D, b_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_R_old, b_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_L_old, b_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_U_old, b_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b_D_old, b_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_pressao, pressao, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_pressao_old, pressao_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_mat_font, mat_font, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_mat_perm, mat_perm, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_mat_epsilon, mat_epsilon, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
return 0;
}
void copia_dados_para_cpu(){
HANDLE_ERROR( cudaMemcpy( q_R, dev_q_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_L, dev_q_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_U, dev_q_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_D, dev_q_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_R_old, dev_q_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_L_old, dev_q_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_U_old, dev_q_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( q_D_old, dev_q_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_R, dev_l_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_L, dev_l_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_U, dev_l_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_D, dev_l_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_R_old, dev_l_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_L_old, dev_l_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_U_old, dev_l_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( l_D_old, dev_l_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_R, dev_b_R, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_L, dev_b_L, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_U, dev_b_U, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_D, dev_b_D, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_R_old, dev_b_R_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_L_old, dev_b_L_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_U_old, dev_b_U_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( b_D_old, dev_b_D_old, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( mat_font, dev_mat_font, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( mat_perm, dev_mat_perm, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( mat_epsilon, dev_mat_epsilon, tam_mat_real * tam_mat_real * sizeof(double),
cudaMemcpyHostToDevice ) );
} |
0b82d45e8fcd220fcbadfce3727c6210dea3361f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Event.cuh"
//CUDA Kernel for Minidoublet creation
__global__ void createMiniDoubletsInGPU(SDL::MiniDoublet* mdCands, int n, SDL::MDAlgo algo)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = tid; i<n; i+= stride)
{
mdCands[i].runMiniDoubletAlgo(algo);
}
}
SDL::Event::Event() : logLevel_(SDL::Log_Nothing)
{
n_hits_by_layer_barrel_.fill(0);
n_hits_by_layer_endcap_.fill(0);
n_hits_by_layer_barrel_upper_.fill(0);
n_hits_by_layer_endcap_upper_.fill(0);
n_miniDoublet_candidates_by_layer_barrel_.fill(0);
n_miniDoublet_by_layer_barrel_.fill(0);
n_miniDoublet_candidates_by_layer_endcap_.fill(0);
n_miniDoublet_by_layer_endcap_.fill(0);
//counters
moduleMemoryCounter = 0;
hitMemoryCounter = 0;
hit2SEdgeMemoryCounter = 0;
mdMemoryCounter = 0;
}
SDL::Event::~Event()
{
hipFree(hitsInGPU);
hipFree(modulesInGPU);
hipFree(mdsInGPU);
hipFree(mdCandsGPU);
}
bool SDL::Event::hasModule(unsigned int detId)
{
if (modulesMapByDetId_.find(detId) == modulesMapByDetId_.end())
{
return false;
}
else
{
return true;
}
}
void SDL::Event::setLogLevel(SDL::LogLevel logLevel)
{
logLevel_ = logLevel;
}
void SDL::Event::initModulesInGPU()
{
const int MODULE_MAX=50000;
hipMallocManaged(&modulesInGPU,MODULE_MAX * sizeof(SDL::Module));
}
SDL::Module* SDL::Event::getModule(unsigned int detId)
{
// using std::map::emplace
if(moduleMemoryCounter == 0)
{
initModulesInGPU();
}
std::pair<std::map<unsigned int, Module*>::iterator, bool> emplace_result = modulesMapByDetId_.emplace(detId,nullptr);
// Retreive the module
auto& inserted_or_existing = (*(emplace_result.first)).second;
// If new was inserted, then insert to modulePtrs_ pointer list
if (emplace_result.second) // if true, new was inserted
{
//hipMallocManaged(&((*(emplace_result.first)).second),sizeof(SDL::Module));
(*(emplace_result.first)).second = &modulesInGPU[moduleMemoryCounter];
//*inserted_or_existing =SDL:: Module(detId);
modulesInGPU[moduleMemoryCounter] = SDL::Module(detId);
Module* module_ptr = inserted_or_existing;
// Add the module pointer to the list of modules
modulePtrs_.push_back(module_ptr);
// If the module is lower module then add to list of lower modules
if (module_ptr->isLower())
lowerModulePtrs_.push_back(module_ptr);
moduleMemoryCounter++;
}
return inserted_or_existing;
}
const std::vector<SDL::Module*> SDL::Event::getModulePtrs() const
{
return modulePtrs_;
}
const std::vector<SDL::Module*> SDL::Event::getLowerModulePtrs() const
{
return lowerModulePtrs_;
}
void SDL::Event::initHitsInGPU()
{
const int HIT_MAX = 1000000;
hipMallocManaged(&hitsInGPU,HIT_MAX * sizeof(SDL::Hit));
const int HIT_2S_MAX = 100000;
hipMallocManaged(&hits2sEdgeInGPU,HIT_2S_MAX * sizeof(SDL::Hit));
}
void SDL::Event::addHitToModule(SDL::Hit hit, unsigned int detId)
{
// Add to global list of hits, where it will hold the object's instance
// And get the module (if not exists, then create), and add the address to Module.hits_
//construct a hipMallocManaged object and send that in, so that we won't have issues in the GPU
if(hitMemoryCounter == 0)
{
initHitsInGPU();
}
hitsInGPU[hitMemoryCounter] = hit;
hitsInGPU[hitMemoryCounter].setModule(getModule(detId));
getModule(detId)->addHit(&hitsInGPU[hitMemoryCounter]);
hits_.push_back(hitsInGPU[hitMemoryCounter]);
// Count number of hits in the event
incrementNumberOfHits(*getModule(detId));
// If the hit is 2S in the endcap then the hit boundary needs to be set
if (getModule(detId)->subdet() == SDL::Module::Endcap and getModule(detId)->moduleType() == SDL::Module::TwoS)
{
hits2sEdgeInGPU[hit2SEdgeMemoryCounter] = GeometryUtil::stripHighEdgeHit(hitsInGPU[hitMemoryCounter]);
hits2sEdgeInGPU[hit2SEdgeMemoryCounter+1] = GeometryUtil::stripLowEdgeHit(hitsInGPU[hitMemoryCounter]);
// hits_2s_edges_.push_back(GeometryUtil::stripHighEdgeHit(&hits_.back()));
// hits_.back().setHitHighEdgePtr(&(hits_2s_edges_.back()));
// hits_2s_edges_.push_back(GeometryUtil::stripLowEdgeHit(*hitForGPU));
// hits_.back().setHitLowEdgePtr(&(hits_2s_edges_.back()));
hits_2s_edges_.push_back(hits2sEdgeInGPU[hit2SEdgeMemoryCounter]);
hitsInGPU[hitMemoryCounter].setHitHighEdgePtr(&hits2sEdgeInGPU[hit2SEdgeMemoryCounter]);
hits_2s_edges_.push_back(hits2sEdgeInGPU[hit2SEdgeMemoryCounter+1]);
hitsInGPU[hitMemoryCounter].setHitLowEdgePtr(&hits2sEdgeInGPU[hit2SEdgeMemoryCounter+1]);
hit2SEdgeMemoryCounter+= 2;
}
hitMemoryCounter++;
}
void SDL::Event::createMiniDoublets(MDAlgo algo)
{
// Loop over lower modules
const int MAX_MD_CAND = 5000000;
hipMallocManaged(&mdCandsGPU,MAX_MD_CAND*sizeof(SDL::MiniDoublet));
mdGPUCounter = 0;
for (auto& lowerModulePtr : getLowerModulePtrs())
{
// Create mini doublets
createMiniDoubletsFromLowerModule(lowerModulePtr->detId(), MAX_MD_CAND,algo);
}
if(mdGPUCounter < MAX_MD_CAND and mdGPUCounter > 0) //incomplete dudes from the final iteration
{
miniDoubletGPUWrapper(algo);
}
}
void SDL::Event::miniDoubletGPUWrapper(SDL::MDAlgo algo)
{
int nThreads = 256;
int nBlocks = (mdGPUCounter % nThreads == 0) ? mdGPUCounter/nThreads : mdGPUCounter/nThreads + 1;
hipLaunchKernelGGL(( createMiniDoubletsInGPU) , dim3(nBlocks), dim3(nThreads), 0, 0, mdCandsGPU,mdGPUCounter,algo);
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
{
std::cout<<"kernel launch failed with error : "<<hipGetErrorString(cudaerr)<<std::endl;
}
for(int i = 0; i < mdGPUCounter; i++)
{
auto mdCand = mdCandsGPU[i];
if(mdCand.passesMiniDoubletAlgo(algo))
{
// Count the number of md formed
SDL::Module& lowerModule = (Module&)((mdCand.lowerHitPtr())->getModule());
incrementNumberOfMiniDoublets(lowerModule);
if (lowerModule.subdet() == SDL::Module::Barrel)
{
addMiniDoubletToEvent(mdCand, lowerModule.detId());
}
else
{
addMiniDoubletToEvent(mdCand, lowerModule.detId());
}
}
}
mdGPUCounter = 0;
}
void SDL::Event::createMiniDoubletsFromLowerModule(unsigned int detId, int maxMDCands,SDL::MDAlgo algo)
{
// Get reference to the lower Module
Module& lowerModule = *getModule(detId);
// Get reference to the upper Module
Module& upperModule = *getModule(lowerModule.partnerDetId());
// Double nested loops
// Loop over lower module hits
//Number hardcoded from occupancy plots
for (auto& lowerHitPtr : lowerModule.getHitPtrs())
{
// Get reference to lower Hit
SDL::Hit& lowerHit = *lowerHitPtr;
// Loop over upper module hits
for (auto& upperHitPtr : upperModule.getHitPtrs())
{
// Get reference to upper Hit
SDL::Hit& upperHit = *upperHitPtr;
// Create a mini-doublet candidate
SDL::MiniDoublet mdCand(lowerHitPtr, upperHitPtr);
if(lowerModule.moduleType() == SDL::Module::PS and upperModule.moduleLayerType() == SDL::Module::Strip)
{
mdCand.setDrDz(tiltedGeometry.getDrDz(upperModule.detId()));
}
else
{
mdCand.setDrDz(tiltedGeometry.getDrDz(lowerModule.detId()));
}
if(lowerModule.subdet() == SDL::Module::Endcap)
{
if(lowerModule.moduleType() == SDL::Module::PS and upperModule.moduleLayerType() == SDL::Module::Strip)
{
mdCand.setLowerModuleSlope(SDL::endcapGeometry.getSlopeLower(upperModule.detId()));
}
else
{
mdCand.setLowerModuleSlope(SDL::endcapGeometry.getSlopeLower(lowerModule.detId()));
}
}
else
{
//FIXME: Might need some jugaad for nonexistent det Ids
if(lowerModule.moduleType() == SDL::Module::PS and upperModule.moduleLayerType() == SDL::Module::Strip)
{
mdCand.setLowerModuleSlope(SDL::tiltedGeometry.getSlope(upperModule.detId()));
}
else
{
mdCand.setLowerModuleSlope(SDL::tiltedGeometry.getSlope(lowerModule.detId()));
}
}
// memcpy(&mdCandsGPU[mdGPUCounter],&mdCand,sizeof(SDL::MiniDoublet));
mdCandsGPU[mdGPUCounter] = mdCand;
mdGPUCounter++;
if(mdGPUCounter == maxMDCands)
{
miniDoubletGPUWrapper(algo);
}
// Count the number of mdCand considered
incrementNumberOfMiniDoubletCandidates(lowerModule);
}
}
// Run mini-doublet algorithm on mdCand (mini-doublet candidate)
//after running MD algo'
}
// Multiplicity of mini-doublets
unsigned int SDL::Event::getNumberOfHits() { return hits_.size(); }
// Multiplicity of mini-doublets
unsigned int SDL::Event::getNumberOfMiniDoublets() { return miniDoublets_.size(); }
// Multiplicity of mini-doublet candidates considered in this event
unsigned int SDL::Event::getNumberOfMiniDoubletCandidates() { unsigned int n = 0; for (unsigned int i = 0; i < 6; ++i) {n += n_miniDoublet_candidates_by_layer_barrel_[i];} for (unsigned int i = 0; i < 5; ++i) {n += n_miniDoublet_candidates_by_layer_endcap_[i];} return n; }
// Multiplicity of mini-doublet formed in this event
unsigned int SDL::Event::getNumberOfMiniDoubletsByLayerBarrel(unsigned int ilayer) { return n_miniDoublet_by_layer_barrel_[ilayer]; }
// Multiplicity of mini-doublet formed in this event
unsigned int SDL::Event::getNumberOfMiniDoubletsByLayerEndcap(unsigned int ilayer) { return n_miniDoublet_by_layer_endcap_[ilayer]; }
// Multiplicity of hits in this event
void SDL::Event::incrementNumberOfHits(SDL::Module& module)
{
int layer = module.layer();
int isbarrel = (module.subdet() == SDL::Module::Barrel);
// Only count hits in lower module
if (not module.isLower())
{
if (isbarrel)
n_hits_by_layer_barrel_upper_[layer-1]++;
else
n_hits_by_layer_endcap_upper_[layer-1]++;
}
else
{
if (isbarrel)
n_hits_by_layer_barrel_[layer-1]++;
else
n_hits_by_layer_endcap_[layer-1]++;
}
}
// Multiplicity of mini-doublet candidates considered in this event
void SDL::Event::incrementNumberOfMiniDoubletCandidates(SDL::Module& module)
{
int layer = module.layer();
int isbarrel = (module.subdet() == SDL::Module::Barrel);
if (isbarrel)
n_miniDoublet_candidates_by_layer_barrel_[layer-1]++;
else
n_miniDoublet_candidates_by_layer_endcap_[layer-1]++;
}
// Multiplicity of mini-doublet formed in this event
void SDL::Event::incrementNumberOfMiniDoublets(SDL::Module& module)
{
int layer = module.layer();
int isbarrel = (module.subdet() == SDL::Module::Barrel);
if (isbarrel)
n_miniDoublet_by_layer_barrel_[layer-1]++;
else
n_miniDoublet_by_layer_endcap_[layer-1]++;
}
void SDL::Event::initMDsInGPU()
{
const int MD_MAX = 60000;
hipMallocManaged(&mdsInGPU,MD_MAX * sizeof(SDL::MiniDoublet));
}
void SDL::Event::addMiniDoubletToEvent(SDL::MiniDoublet md, unsigned int detId)
{
if(mdMemoryCounter == 0)
{
initMDsInGPU();
}
// Add to global list of mini doublets, where it will hold the object's instance
// And get the module (if not exists, then create), and add the address to Module.hits_
//construct a hipMallocManaged object and send that in, so that we won't have issues in the GPU
mdsInGPU[mdMemoryCounter] = md;
getModule(detId)->addMiniDoublet(&mdsInGPU[mdMemoryCounter]);
miniDoublets_.push_back(mdsInGPU[mdMemoryCounter]);
// And get the layer
mdMemoryCounter++;
}
namespace SDL
{
std::ostream& operator<<(std::ostream& out, const Event& event)
{
out << "" << std::endl;
out << "==============" << std::endl;
out << "Printing Event" << std::endl;
out << "==============" << std::endl;
out << "" << std::endl;
for (auto& modulePtr : event.modulePtrs_)
{
out << modulePtr;
}
return out;
}
std::ostream& operator<<(std::ostream& out, const Event* event)
{
out << *event;
return out;
}
}
| 0b82d45e8fcd220fcbadfce3727c6210dea3361f.cu | #include "Event.cuh"
//CUDA Kernel for Minidoublet creation
__global__ void createMiniDoubletsInGPU(SDL::MiniDoublet* mdCands, int n, SDL::MDAlgo algo)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = tid; i<n; i+= stride)
{
mdCands[i].runMiniDoubletAlgo(algo);
}
}
SDL::Event::Event() : logLevel_(SDL::Log_Nothing)
{
n_hits_by_layer_barrel_.fill(0);
n_hits_by_layer_endcap_.fill(0);
n_hits_by_layer_barrel_upper_.fill(0);
n_hits_by_layer_endcap_upper_.fill(0);
n_miniDoublet_candidates_by_layer_barrel_.fill(0);
n_miniDoublet_by_layer_barrel_.fill(0);
n_miniDoublet_candidates_by_layer_endcap_.fill(0);
n_miniDoublet_by_layer_endcap_.fill(0);
//counters
moduleMemoryCounter = 0;
hitMemoryCounter = 0;
hit2SEdgeMemoryCounter = 0;
mdMemoryCounter = 0;
}
SDL::Event::~Event()
{
cudaFree(hitsInGPU);
cudaFree(modulesInGPU);
cudaFree(mdsInGPU);
cudaFree(mdCandsGPU);
}
bool SDL::Event::hasModule(unsigned int detId)
{
if (modulesMapByDetId_.find(detId) == modulesMapByDetId_.end())
{
return false;
}
else
{
return true;
}
}
void SDL::Event::setLogLevel(SDL::LogLevel logLevel)
{
logLevel_ = logLevel;
}
void SDL::Event::initModulesInGPU()
{
const int MODULE_MAX=50000;
cudaMallocManaged(&modulesInGPU,MODULE_MAX * sizeof(SDL::Module));
}
SDL::Module* SDL::Event::getModule(unsigned int detId)
{
// using std::map::emplace
if(moduleMemoryCounter == 0)
{
initModulesInGPU();
}
std::pair<std::map<unsigned int, Module*>::iterator, bool> emplace_result = modulesMapByDetId_.emplace(detId,nullptr);
// Retreive the module
auto& inserted_or_existing = (*(emplace_result.first)).second;
// If new was inserted, then insert to modulePtrs_ pointer list
if (emplace_result.second) // if true, new was inserted
{
//cudaMallocManaged(&((*(emplace_result.first)).second),sizeof(SDL::Module));
(*(emplace_result.first)).second = &modulesInGPU[moduleMemoryCounter];
//*inserted_or_existing =SDL:: Module(detId);
modulesInGPU[moduleMemoryCounter] = SDL::Module(detId);
Module* module_ptr = inserted_or_existing;
// Add the module pointer to the list of modules
modulePtrs_.push_back(module_ptr);
// If the module is lower module then add to list of lower modules
if (module_ptr->isLower())
lowerModulePtrs_.push_back(module_ptr);
moduleMemoryCounter++;
}
return inserted_or_existing;
}
const std::vector<SDL::Module*> SDL::Event::getModulePtrs() const
{
return modulePtrs_;
}
const std::vector<SDL::Module*> SDL::Event::getLowerModulePtrs() const
{
return lowerModulePtrs_;
}
void SDL::Event::initHitsInGPU()
{
const int HIT_MAX = 1000000;
cudaMallocManaged(&hitsInGPU,HIT_MAX * sizeof(SDL::Hit));
const int HIT_2S_MAX = 100000;
cudaMallocManaged(&hits2sEdgeInGPU,HIT_2S_MAX * sizeof(SDL::Hit));
}
void SDL::Event::addHitToModule(SDL::Hit hit, unsigned int detId)
{
// Add to global list of hits, where it will hold the object's instance
// And get the module (if not exists, then create), and add the address to Module.hits_
//construct a cudaMallocManaged object and send that in, so that we won't have issues in the GPU
if(hitMemoryCounter == 0)
{
initHitsInGPU();
}
hitsInGPU[hitMemoryCounter] = hit;
hitsInGPU[hitMemoryCounter].setModule(getModule(detId));
getModule(detId)->addHit(&hitsInGPU[hitMemoryCounter]);
hits_.push_back(hitsInGPU[hitMemoryCounter]);
// Count number of hits in the event
incrementNumberOfHits(*getModule(detId));
// If the hit is 2S in the endcap then the hit boundary needs to be set
if (getModule(detId)->subdet() == SDL::Module::Endcap and getModule(detId)->moduleType() == SDL::Module::TwoS)
{
hits2sEdgeInGPU[hit2SEdgeMemoryCounter] = GeometryUtil::stripHighEdgeHit(hitsInGPU[hitMemoryCounter]);
hits2sEdgeInGPU[hit2SEdgeMemoryCounter+1] = GeometryUtil::stripLowEdgeHit(hitsInGPU[hitMemoryCounter]);
// hits_2s_edges_.push_back(GeometryUtil::stripHighEdgeHit(&hits_.back()));
// hits_.back().setHitHighEdgePtr(&(hits_2s_edges_.back()));
// hits_2s_edges_.push_back(GeometryUtil::stripLowEdgeHit(*hitForGPU));
// hits_.back().setHitLowEdgePtr(&(hits_2s_edges_.back()));
hits_2s_edges_.push_back(hits2sEdgeInGPU[hit2SEdgeMemoryCounter]);
hitsInGPU[hitMemoryCounter].setHitHighEdgePtr(&hits2sEdgeInGPU[hit2SEdgeMemoryCounter]);
hits_2s_edges_.push_back(hits2sEdgeInGPU[hit2SEdgeMemoryCounter+1]);
hitsInGPU[hitMemoryCounter].setHitLowEdgePtr(&hits2sEdgeInGPU[hit2SEdgeMemoryCounter+1]);
hit2SEdgeMemoryCounter+= 2;
}
hitMemoryCounter++;
}
void SDL::Event::createMiniDoublets(MDAlgo algo)
{
// Loop over lower modules
const int MAX_MD_CAND = 5000000;
cudaMallocManaged(&mdCandsGPU,MAX_MD_CAND*sizeof(SDL::MiniDoublet));
mdGPUCounter = 0;
for (auto& lowerModulePtr : getLowerModulePtrs())
{
// Create mini doublets
createMiniDoubletsFromLowerModule(lowerModulePtr->detId(), MAX_MD_CAND,algo);
}
if(mdGPUCounter < MAX_MD_CAND and mdGPUCounter > 0) //incomplete dudes from the final iteration
{
miniDoubletGPUWrapper(algo);
}
}
void SDL::Event::miniDoubletGPUWrapper(SDL::MDAlgo algo)
{
int nThreads = 256;
int nBlocks = (mdGPUCounter % nThreads == 0) ? mdGPUCounter/nThreads : mdGPUCounter/nThreads + 1;
createMiniDoubletsInGPU <<<nBlocks, nThreads>>> (mdCandsGPU,mdGPUCounter,algo);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
{
std::cout<<"kernel launch failed with error : "<<cudaGetErrorString(cudaerr)<<std::endl;
}
for(int i = 0; i < mdGPUCounter; i++)
{
auto mdCand = mdCandsGPU[i];
if(mdCand.passesMiniDoubletAlgo(algo))
{
// Count the number of md formed
SDL::Module& lowerModule = (Module&)((mdCand.lowerHitPtr())->getModule());
incrementNumberOfMiniDoublets(lowerModule);
if (lowerModule.subdet() == SDL::Module::Barrel)
{
addMiniDoubletToEvent(mdCand, lowerModule.detId());
}
else
{
addMiniDoubletToEvent(mdCand, lowerModule.detId());
}
}
}
mdGPUCounter = 0;
}
void SDL::Event::createMiniDoubletsFromLowerModule(unsigned int detId, int maxMDCands,SDL::MDAlgo algo)
{
// Get reference to the lower Module
Module& lowerModule = *getModule(detId);
// Get reference to the upper Module
Module& upperModule = *getModule(lowerModule.partnerDetId());
// Double nested loops
// Loop over lower module hits
//Number hardcoded from occupancy plots
for (auto& lowerHitPtr : lowerModule.getHitPtrs())
{
// Get reference to lower Hit
SDL::Hit& lowerHit = *lowerHitPtr;
// Loop over upper module hits
for (auto& upperHitPtr : upperModule.getHitPtrs())
{
// Get reference to upper Hit
SDL::Hit& upperHit = *upperHitPtr;
// Create a mini-doublet candidate
SDL::MiniDoublet mdCand(lowerHitPtr, upperHitPtr);
if(lowerModule.moduleType() == SDL::Module::PS and upperModule.moduleLayerType() == SDL::Module::Strip)
{
mdCand.setDrDz(tiltedGeometry.getDrDz(upperModule.detId()));
}
else
{
mdCand.setDrDz(tiltedGeometry.getDrDz(lowerModule.detId()));
}
if(lowerModule.subdet() == SDL::Module::Endcap)
{
if(lowerModule.moduleType() == SDL::Module::PS and upperModule.moduleLayerType() == SDL::Module::Strip)
{
mdCand.setLowerModuleSlope(SDL::endcapGeometry.getSlopeLower(upperModule.detId()));
}
else
{
mdCand.setLowerModuleSlope(SDL::endcapGeometry.getSlopeLower(lowerModule.detId()));
}
}
else
{
//FIXME: Might need some jugaad for nonexistent det Ids
if(lowerModule.moduleType() == SDL::Module::PS and upperModule.moduleLayerType() == SDL::Module::Strip)
{
mdCand.setLowerModuleSlope(SDL::tiltedGeometry.getSlope(upperModule.detId()));
}
else
{
mdCand.setLowerModuleSlope(SDL::tiltedGeometry.getSlope(lowerModule.detId()));
}
}
// memcpy(&mdCandsGPU[mdGPUCounter],&mdCand,sizeof(SDL::MiniDoublet));
mdCandsGPU[mdGPUCounter] = mdCand;
mdGPUCounter++;
if(mdGPUCounter == maxMDCands)
{
miniDoubletGPUWrapper(algo);
}
// Count the number of mdCand considered
incrementNumberOfMiniDoubletCandidates(lowerModule);
}
}
// Run mini-doublet algorithm on mdCand (mini-doublet candidate)
//after running MD algo'
}
// Multiplicity of mini-doublets
unsigned int SDL::Event::getNumberOfHits() { return hits_.size(); }
// Multiplicity of mini-doublets
unsigned int SDL::Event::getNumberOfMiniDoublets() { return miniDoublets_.size(); }
// Multiplicity of mini-doublet candidates considered in this event
unsigned int SDL::Event::getNumberOfMiniDoubletCandidates() { unsigned int n = 0; for (unsigned int i = 0; i < 6; ++i) {n += n_miniDoublet_candidates_by_layer_barrel_[i];} for (unsigned int i = 0; i < 5; ++i) {n += n_miniDoublet_candidates_by_layer_endcap_[i];} return n; }
// Multiplicity of mini-doublet formed in this event
unsigned int SDL::Event::getNumberOfMiniDoubletsByLayerBarrel(unsigned int ilayer) { return n_miniDoublet_by_layer_barrel_[ilayer]; }
// Multiplicity of mini-doublet formed in this event
unsigned int SDL::Event::getNumberOfMiniDoubletsByLayerEndcap(unsigned int ilayer) { return n_miniDoublet_by_layer_endcap_[ilayer]; }
// Multiplicity of hits in this event
void SDL::Event::incrementNumberOfHits(SDL::Module& module)
{
int layer = module.layer();
int isbarrel = (module.subdet() == SDL::Module::Barrel);
// Only count hits in lower module
if (not module.isLower())
{
if (isbarrel)
n_hits_by_layer_barrel_upper_[layer-1]++;
else
n_hits_by_layer_endcap_upper_[layer-1]++;
}
else
{
if (isbarrel)
n_hits_by_layer_barrel_[layer-1]++;
else
n_hits_by_layer_endcap_[layer-1]++;
}
}
// Multiplicity of mini-doublet candidates considered in this event
void SDL::Event::incrementNumberOfMiniDoubletCandidates(SDL::Module& module)
{
int layer = module.layer();
int isbarrel = (module.subdet() == SDL::Module::Barrel);
if (isbarrel)
n_miniDoublet_candidates_by_layer_barrel_[layer-1]++;
else
n_miniDoublet_candidates_by_layer_endcap_[layer-1]++;
}
// Multiplicity of mini-doublet formed in this event
void SDL::Event::incrementNumberOfMiniDoublets(SDL::Module& module)
{
int layer = module.layer();
int isbarrel = (module.subdet() == SDL::Module::Barrel);
if (isbarrel)
n_miniDoublet_by_layer_barrel_[layer-1]++;
else
n_miniDoublet_by_layer_endcap_[layer-1]++;
}
void SDL::Event::initMDsInGPU()
{
const int MD_MAX = 60000;
cudaMallocManaged(&mdsInGPU,MD_MAX * sizeof(SDL::MiniDoublet));
}
void SDL::Event::addMiniDoubletToEvent(SDL::MiniDoublet md, unsigned int detId)
{
if(mdMemoryCounter == 0)
{
initMDsInGPU();
}
// Add to global list of mini doublets, where it will hold the object's instance
// And get the module (if not exists, then create), and add the address to Module.hits_
//construct a cudaMallocManaged object and send that in, so that we won't have issues in the GPU
mdsInGPU[mdMemoryCounter] = md;
getModule(detId)->addMiniDoublet(&mdsInGPU[mdMemoryCounter]);
miniDoublets_.push_back(mdsInGPU[mdMemoryCounter]);
// And get the layer
mdMemoryCounter++;
}
namespace SDL
{
std::ostream& operator<<(std::ostream& out, const Event& event)
{
out << "" << std::endl;
out << "==============" << std::endl;
out << "Printing Event" << std::endl;
out << "==============" << std::endl;
out << "" << std::endl;
for (auto& modulePtr : event.modulePtrs_)
{
out << modulePtr;
}
return out;
}
std::ostream& operator<<(std::ostream& out, const Event* event)
{
out << *event;
return out;
}
}
|
39ce22b852a851284a7840fe3b812a1537166728.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "louvain_cuda.cuh"
#include "louvain_cuda_cpp_interface.hpp"
#include "GpuGraph.cuh"
#include <cstring>
#include <sstream>
#include <sys/time.h>
#include <time.h>
#include <hip/hip_cooperative_groups.h>
// #define PRINT_HYBRID
// #define PRINT_TIMEDS
// #define USE_HYBRID_CPU_GPU
#define L_THREADBLOCK_SIZE 512
#define M_THREADBLOCK_SIZE 256
// #define S_THREADBLOCK_SIZE 128
#define S_THREADBLOCK_SIZE 640
// #define MS_THREADBLOCK_SIZE 32
#define MS_THREADBLOCK_SIZE 512
#define ARRAY_REDUCE_THREADBLOCK_SIZE 32
// #define S_BLOCK_TILE ( ARRAY_REDUCE_THREADBLOCK_SIZE / PHY_WRP_SZ )
#define S_BLOCK_TILE ( S_THREADBLOCK_SIZE / PHY_WRP_SZ )
#define MS_BLOCK_TILE ( MS_THREADBLOCK_SIZE / PHY_WRP_SZ )
#define FINDING_UNIQCOMM_BLOCK_TILE ( FINDING_UNIQCOMM_BLOCK_SIZE / PHY_WRP_SZ )
#define CUT_SIZE_NUM_EDGES2 9400000000
// #define CUT_SIZE_NUM_EDGES1 4096
// #define CUT_SIZE_NUM_EDGES12 4096
#define CUT_SIZE_NUM_EDGES1 4096
#define CUT_SIZE_NUM_EDGES12 4096
// #define CUT_SIZE_NUM_EDGES1 3310720
namespace cg = cooperative_groups;
using namespace CuVite;
double timer ( void )
{
struct timeval tv;
struct timezone tz;
gettimeofday ( &tv, &tz );
return (tv.tv_sec + 0.000001 * tv.tv_usec);
}
void updateLocalTarget_gpu (
GraphElem nv,
const CommunityVector &currComm,
CommunityVector &targetComm,
const GraphWeightVector &vDegree,
CommMap &remoteCupdate,
CommunityVector &temp_targetComm,
CommVector &localCupdate,
const GraphElem base, const GraphElem bound, int numIters
) {
// omp_set_num_threads(7);
#pragma omp parallel default(none), shared(nv, localCupdate, currComm, \
targetComm, vDegree, remoteCupdate, \
temp_targetComm, numIters)
#pragma omp for schedule(guided)
for (int i = 0; i < nv; i++) {
GraphElem localTarget, ModlocalTarget;
bool currCommIsLocal;
bool targetCommIsLocal;
GraphElem cc = currComm[i];
if(cc >= base && cc < bound) currCommIsLocal = true;
ModlocalTarget = temp_targetComm[i];
localTarget = ModlocalTarget;
/// is the Target Local?
if (ModlocalTarget >= base && ModlocalTarget < bound) targetCommIsLocal = true;
/// Modify current if >= bound using stored map
if (cc < base || cc >= bound) {
currCommIsLocal = false;
}
/// Modify ModlocalTarget if >= bound using stored map
/// Stored map is no more required
if (ModlocalTarget < base || ModlocalTarget >= bound) {
targetCommIsLocal = false;
}
// std::cout << "GPU i[" << i << "]; cc[" << cc << "]; localTarget["
// << localTarget << "]" << std::endl;
// current and target comm are local - atomic updates to vectors
if((localTarget != cc) && (localTarget != -1) &&
currCommIsLocal && targetCommIsLocal) {
#ifdef DEBUG_PRINTF
assert( base < localTarget < bound);
assert( base < cc < bound);
assert( cc - base < localCupdate.size());
assert( (localTarget - base) < (GraphElem)localCupdate.size());
#endif
#pragma omp atomic update
localCupdate[localTarget-base].degree += vDegree[i];
#pragma omp atomic update
localCupdate[localTarget-base].size++;
#pragma omp atomic update
localCupdate[cc-base].degree -= vDegree[i];
#pragma omp atomic update
localCupdate[cc-base].size--;
}
/// current is local, target is not - do atomic on local,
/// accumulate in Maps for remote
if ((localTarget != cc) && (localTarget != -1) &&
currCommIsLocal && !targetCommIsLocal) {
#pragma omp atomic update
localCupdate[cc-base].degree -= vDegree[i];
#pragma omp atomic update
localCupdate[cc-base].size--;
/// Search target in remoteCupdate
CommMap::iterator iter=remoteCupdate.find(localTarget);
#pragma omp atomic update
iter->second.degree += vDegree[i];
#pragma omp atomic update
iter->second.size++;
}
/// current is remote, target is local
/// accumulate for current, atomic on local
if ((localTarget != cc) && (localTarget != -1) &&
!currCommIsLocal && targetCommIsLocal) {
#pragma omp atomic update
localCupdate[localTarget-base].degree += vDegree[i];
#pragma omp atomic update
localCupdate[localTarget-base].size++;
/// Search in remoteCupdate
CommMap::iterator iter=remoteCupdate.find(cc);
#pragma omp atomic update
iter->second.degree -= vDegree[i];
#pragma omp atomic update
iter->second.size--;
}
/// Current and target are remote - accumulate for both
if ((localTarget != cc) && (localTarget != -1) &&
!currCommIsLocal && !targetCommIsLocal) {
// search current
CommMap::iterator iter=remoteCupdate.find(cc);
#pragma omp atomic update
iter->second.degree -= vDegree[i];
#pragma omp atomic update
iter->second.size--;
// search target
iter=remoteCupdate.find(localTarget);
#pragma omp atomic update
iter->second.degree += vDegree[i];
#pragma omp atomic update
iter->second.size++;
}
#ifdef DEBUG_PRINTF
assert(localTarget != -1);
#endif
targetComm[i] = localTarget;
// std::cout << "GPU i[" << i << "]; cc[" << cc << "]; localTarget["
// << localTarget << "]" << std::endl;
}
}
__device__ GraphWeight weight_reduce(
cg::thread_group g, GraphWeight *x, GraphWeight val)
{
int lane = g.thread_rank();
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = g.size() / 2; i > 0; i /= 2) {
x[lane] = val; g.sync();
val += x[lane+i]; g.sync();
}
return val; // note: only thread 0 will return full sum
}
template <int tile_sz>
__device__ GraphWeight weight_reduce_sum_tile_shfl(
cg::thread_block_tile<tile_sz> g, GraphWeight val)
{
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = g.size() / 2; i > 0; i /= 2) {
val += g.shfl_down(val, i);
}
return val; // note: only thread 0 will return full sum
}
#ifndef USE_32_BIT_GRAPH
template <int tile_sz>
__device__ GraphElem reduce_sum_tile_shfl(
cg::thread_block_tile<tile_sz> g, GraphElem val)
{
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = g.size() / 2; i > 0; i /= 2) {
val += g.shfl_down(val, i);
}
return val; // note: only thread 0 will return full sum
}
#endif
template <int tile_sz>
__device__ int reduce_sum_tile_shfl(
cg::thread_block_tile<tile_sz> g, int val)
{
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = g.size() / 2; i > 0; i /= 2) {
val += g.shfl_down(val, i);
}
return val; // note: only thread 0 will return full sum
}
template<int tile_sz>
__global__
void computeMaxIndex_large_thb(
GraphElem nv,
GraphElem* ocurrComm, GraphElem* currComm,
GraphElem* localCinfo_size, GraphWeight* localCinfo_degree,
GraphElem* localCinfo_oComm,
GraphWeight* selfLoop,
GraphElem* uniq_clus_vec, GraphWeight* counter,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphWeight* vDegree_vec,
GraphElem* localTarget,
GraphWeight* clusterWeight,
const double constant,
const GraphElem base, const GraphElem bound
)
{
__shared__ int shared_num_uniq_cl[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_my_counter[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_curGain[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Index[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Size[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_maxGain[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxIndex[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxSize[S_BLOCK_TILE];
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
shared_num_uniq_cl[ii] = 0;
if(i < nv) {
shared_num_uniq_cl[ii] = uniq_clus_vec[i];
}
/// Create cooperative groups
auto thb_g = cg::this_thread_block();
// auto tileIdx = thb_g.thread_rank()/tile_sz;
#if __cuda_arch__ >= 700
auto tile = cg::partition<tile_sz>(cg::this_thread_block());
#else
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
#endif
auto tileIdx = thb_g.thread_rank()/tile.size();
unsigned ti = tileIdx*tile.size();
GraphWeight *my_counter = &t_my_counter[tileIdx * tile.size()];
GraphWeight *shared_curGain = &t_shared_curGain[tileIdx * tile.size()];
GraphElem *shared_Index = &t_shared_Index[tileIdx * tile.size()];
GraphElem *shared_Size = &t_shared_Size[tileIdx * tile.size()];
GraphWeight *shared_maxGain = &t_shared_maxGain[tileIdx];
GraphElem *shared_maxIndex = &t_shared_maxIndex[tileIdx];
GraphElem *shared_maxSize = &t_shared_maxSize[tileIdx];
GraphElem num_cluster = 0;
GraphWeight ay = 0.0, eiy = 0.0;
GraphWeight eix;
GraphElem size;
GraphWeight curGain = 0.0;
GraphWeight vDegree, ax;
GraphElem cc;
// GraphWeight my_counter;
GraphWeight currDegree;
tile.sync();
cg::sync(thb_g);
for( int wii = 0; wii < thb_g.size(); wii++) {
// num_cluster = shared_num_uniq_cl[tileIdx*tile.size()+wii];
num_cluster = shared_num_uniq_cl[wii];
// if(num_cluster >= (GraphElem)tile_sz)
if(num_cluster > CUT_SIZE_NUM_EDGES12) {
// if(tile.thread_rank() == 0) printf("num_cluster[%ld]; \n",
// num_cluster);
if(tile.thread_rank() == 0) shared_maxGain[0] = 0.0;
tile.sync();
__syncwarp();
shared_Index[tile.thread_rank()] = 0;
// GraphElem ver_loc = (GraphElem)(blockIdx.x*blockDim.x+tileIdx*tile.size()+wii);
GraphElem ver_loc = (GraphElem)(blockIdx.x*blockDim.x+wii);
cc = currComm[ver_loc];
if(tile.thread_rank() == 0)
{
if(cc >= bound) {
shared_maxIndex[0] = ocurrComm[ver_loc];
} else {
shared_maxIndex[0] = cc;
}
shared_maxSize[0] = localCinfo_size[cc - base];
}
tile.sync();
__syncwarp();
// my_counter[tile.thread_rank()] = counter[blockIdx.x*blockDim.x+tileIdx*tile.size()+wii];
my_counter[tile.thread_rank()] = counter[blockIdx.x*blockDim.x+wii];
eix = my_counter[tile.thread_rank()] - selfLoop[ver_loc];
vDegree = vDegree_vec[ver_loc];
currDegree = localCinfo_degree[cc - base];
ax = currDegree - vDegree;
// for(int k = 0; k < ((num_cluster-1)/tile.size()+1); k++)
for(int k = 0; k < ((num_cluster-1)/thb_g.size()+1); k++)
{
// GraphElem thread_indx = k*tile.size() + tile.thread_rank();
GraphElem thread_indx = k*thb_g.size() + thb_g.thread_rank();
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
if(thread_indx < num_cluster) {
cg::coalesced_group active = cg::coalesced_threads();
GraphElem tcomm = clmap_comm[
clmap_loc[ver_loc]+thread_indx];
ay = localCinfo_degree[tcomm - base];
eiy = clmap_weight[
clmap_loc[ver_loc]+thread_indx];
curGain = 2.0 * (eiy - eix) - 2.0 * vDegree * (ay - ax) * constant;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
if(tcomm >= bound) {
shared_Index[tile.thread_rank()] = localCinfo_oComm[tcomm - bound];
} else {
shared_Index[tile.thread_rank()] = tcomm;
}
if((curGain > shared_maxGain[0]) && tcomm != cc ||
(curGain == shared_maxGain[0] && curGain != 0.0 && tcomm != cc &&
shared_Index[tile.thread_rank()] < shared_maxIndex[0]) ) {
shared_curGain[tile.thread_rank()] = curGain;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
} else {
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
shared_Size[tile.thread_rank()] = 0;
}
/// Perform reduction
active.sync();
#pragma unroll
for (int s =1; s < tile.size(); s *=2)
{
int indx = 2 * s * tile.thread_rank();
// int indx = tileIdx*tile.size() + 2 * s * tile.thread_rank();
if(indx < tile.size()) {
if(shared_Index[indx+s] != -1) {
if((shared_curGain[indx+s] > shared_curGain[indx]) ||
(shared_Index[indx] == -1) ||
(shared_curGain[indx+s] == shared_curGain[indx] &&
shared_curGain[indx+s] != 0.0 && shared_Index[indx] != -1 &&
shared_Index[indx+s] < shared_Index[indx])
) {
shared_curGain[indx] = shared_curGain[indx+s];
shared_Index[indx] = shared_Index[indx+s];
shared_Size[indx] = shared_Size[indx+s];
}
} else if(shared_Index[indx+s] == -1 && shared_Index[indx] == -1) {
shared_curGain[indx] = 0.0;
shared_Index[indx] = -1;
shared_Size[indx] = 0;
}
}
active.sync();
}
if(tile.thread_rank() == 0) {
if(shared_curGain[0] > shared_maxGain[0] ||
shared_curGain[0] == shared_maxGain[0] &&
shared_curGain[0] != 0.0 &&
shared_Index[0] < shared_maxIndex[0]
) {
shared_maxGain[0] = shared_curGain[0];
shared_maxIndex[0] = shared_Index[0];
shared_maxSize[0] = shared_Size[0];
}
}
active.sync();
}
tile.sync();
__syncwarp();
}
thb_g.sync();
/// Perform reduction at threadblock level
for (int s =1; s < S_BLOCK_TILE; s *=2)
{
int indx = 2 * s * thb_g.thread_rank();
if(indx < S_BLOCK_TILE) {
// active = cg::coalesced_threads();
if(t_shared_maxIndex[indx+s] != -1) {
if((t_shared_maxGain[indx+s] > t_shared_maxGain[indx]) ||
(t_shared_maxIndex[indx] == -1) ||
(t_shared_maxGain[indx+s] == t_shared_maxGain[indx] &&
t_shared_maxGain[indx+s] != 0.0 && t_shared_maxIndex[indx] != -1 &&
t_shared_maxIndex[indx+s] < t_shared_maxIndex[indx])
) {
t_shared_maxGain[indx] = t_shared_maxGain[indx+s];
t_shared_maxIndex[indx] = t_shared_maxIndex[indx+s];
t_shared_maxSize[indx] = t_shared_maxSize[indx+s];
}
} else if(t_shared_maxIndex[indx+s] == -1 && t_shared_maxIndex[indx] == -1) {
t_shared_maxGain[indx] = 0.0;
t_shared_maxIndex[indx] = -1;
t_shared_maxSize[indx] = 0;
}
// active.sync();
}
}
thb_g.sync();
// if(tile.thread_rank() == 0)
if(thb_g.thread_rank() == 0)
{
GraphElem currSize = localCinfo_size[cc - base];
if(cc >= bound) cc = ocurrComm[ver_loc];
if((t_shared_maxSize[0] == 1) &&
(currSize == 1) &&
(t_shared_maxIndex[0] > cc)) {
t_shared_maxIndex[0] = cc;
}
clusterWeight[ver_loc] += counter[ver_loc];
localTarget[ver_loc] = t_shared_maxIndex[0];
}
thb_g.sync();
} // num_cluster loop
// tile.sync();
// __syncwarp();
thb_g.sync();
} // outer loop
}
template<int tile_sz>
__global__
void computeMaxIndex_large(
GraphElem nv,
GraphElem* ocurrComm, GraphElem* currComm,
GraphElem* localCinfo_size, GraphWeight* localCinfo_degree,
GraphElem* localCinfo_oComm,
GraphWeight* selfLoop,
GraphElem* uniq_clus_vec, GraphWeight* counter,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphWeight* vDegree_vec,
GraphElem* localTarget,
GraphWeight* clusterWeight,
const double constant,
const GraphElem base, const GraphElem bound
)
{
__shared__ int shared_num_uniq_cl[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_my_counter[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_curGain[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Index[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Size[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_maxGain[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxIndex[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxSize[S_BLOCK_TILE];
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
shared_num_uniq_cl[ii] = 0;
if(i < nv) {
shared_num_uniq_cl[ii] = uniq_clus_vec[i];
}
// if(i == 42609) printf("vertex[%ld]; num_clusters[%ld]\n", i, uniq_clus_vec[i]);
/// Create cooperative groups
auto g = cg::this_thread_block();
// auto tileIdx = g.thread_rank()/tile_sz;
#if __cuda_arch__ >= 700
auto tile = cg::partition<tile_sz>(cg::this_thread_block());
#else
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
#endif
auto tileIdx = g.thread_rank()/tile.size();
unsigned ti = tileIdx*tile.size();
GraphWeight *my_counter = &t_my_counter[tileIdx * tile.size()];
GraphWeight *shared_curGain = &t_shared_curGain[tileIdx * tile.size()];
GraphElem *shared_Index = &t_shared_Index[tileIdx * tile.size()];
GraphElem *shared_Size = &t_shared_Size[tileIdx * tile.size()];
GraphWeight *shared_maxGain = &t_shared_maxGain[tileIdx];
GraphElem *shared_maxIndex = &t_shared_maxIndex[tileIdx];
GraphElem *shared_maxSize = &t_shared_maxSize[tileIdx];
GraphElem num_cluster = 0;
GraphWeight ay = 0.0, eiy = 0.0;
GraphWeight eix;
GraphElem size;
GraphWeight curGain = 0.0;
GraphWeight vDegree, ax;
GraphElem cc;
// GraphWeight my_counter;
GraphWeight currDegree;
tile.sync();
for( int wii = 0; wii < tile.size(); wii++) {
num_cluster = shared_num_uniq_cl[tileIdx*tile.size()+wii];
if(num_cluster > CUT_SIZE_NUM_EDGES12) {
if(tile.thread_rank() == 0) shared_maxGain[0] = 0.0;
tile.sync();
__syncwarp();
shared_Index[tile.thread_rank()] = 0;
GraphElem ver_loc = (GraphElem)(blockIdx.x*blockDim.x+tileIdx*tile.size()+wii);
cc = currComm[ver_loc];
if(tile.thread_rank() == 0)
{
if(cc >= bound) {
shared_maxIndex[0] = ocurrComm[ver_loc];
} else {
shared_maxIndex[0] = cc;
}
shared_maxSize[0] = localCinfo_size[cc - base];
}
tile.sync();
__syncwarp();
my_counter[tile.thread_rank()] = counter[blockIdx.x*blockDim.x+tileIdx*tile.size()+wii];
eix = my_counter[tile.thread_rank()] - selfLoop[ver_loc];
vDegree = vDegree_vec[ver_loc];
currDegree = localCinfo_degree[cc - base];
ax = currDegree - vDegree;
for(int k = 0; k < ((num_cluster-1)/tile.size()+1); k++)
{
GraphElem thread_indx = k*tile.size() + tile.thread_rank();
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
if(thread_indx < num_cluster) {
cg::coalesced_group active = cg::coalesced_threads();
GraphElem tcomm = clmap_comm[
clmap_loc[ver_loc]+thread_indx];
ay = localCinfo_degree[tcomm - base];
eiy = clmap_weight[
clmap_loc[ver_loc]+thread_indx];
curGain = 2.0 * (eiy - eix) - 2.0 * vDegree * (ay - ax) * constant;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
if(tcomm >= bound) {
shared_Index[tile.thread_rank()] = localCinfo_oComm[tcomm - bound];
} else {
shared_Index[tile.thread_rank()] = tcomm;
}
if((curGain > shared_maxGain[0]) && tcomm != cc ||
(curGain == shared_maxGain[0] && curGain != 0.0 && tcomm != cc &&
shared_Index[tile.thread_rank()] < shared_maxIndex[0]) ) {
shared_curGain[tile.thread_rank()] = curGain;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
} else {
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
shared_Size[tile.thread_rank()] = 0;
}
/// Perform reduction
active.sync();
#pragma unroll
for (int s =1; s < tile.size(); s *=2)
{
int indx = 2 * s * tile.thread_rank();
// int indx = tileIdx*tile.size() + 2 * s * tile.thread_rank();
if(indx < tile.size()) {
if(shared_Index[indx+s] != -1) {
if((shared_curGain[indx+s] > shared_curGain[indx]) ||
(shared_Index[indx] == -1) ||
(shared_curGain[indx+s] == shared_curGain[indx] &&
shared_curGain[indx+s] != 0.0 && shared_Index[indx] != -1 &&
shared_Index[indx+s] < shared_Index[indx])
) {
shared_curGain[indx] = shared_curGain[indx+s];
shared_Index[indx] = shared_Index[indx+s];
shared_Size[indx] = shared_Size[indx+s];
}
} else if(shared_Index[indx+s] == -1 && shared_Index[indx] == -1) {
shared_curGain[indx] = 0.0;
shared_Index[indx] = -1;
shared_Size[indx] = 0;
}
}
active.sync();
}
if(tile.thread_rank() == 0) {
if(shared_curGain[0] > shared_maxGain[0] ||
shared_curGain[0] == shared_maxGain[0] &&
shared_curGain[0] != 0.0 &&
shared_Index[0] < shared_maxIndex[0]
) {
shared_maxGain[0] = shared_curGain[0];
shared_maxIndex[0] = shared_Index[0];
shared_maxSize[0] = shared_Size[0];
}
}
active.sync();
}
tile.sync();
__syncwarp();
}
if(tile.thread_rank() == 0) {
GraphElem currSize = localCinfo_size[cc - base];
if(cc >= bound) cc = ocurrComm[ver_loc];
if((shared_maxSize[0] == 1) &&
(currSize == 1) &&
(shared_maxIndex[0] > cc)) {
shared_maxIndex[0] = cc;
}
clusterWeight[ver_loc] += counter[ver_loc];
localTarget[ver_loc] = shared_maxIndex[0];
}
tile.sync();
__syncwarp();
}
tile.sync();
__syncwarp();
}
}
template<int tile_sz>
__global__
void computeMaxIndex(
GraphElem nv,
GraphElem* ocurrComm, GraphElem* currComm,
GraphElem* localCinfo_size, GraphWeight* localCinfo_degree,
GraphElem* localCinfo_oComm,
GraphWeight* selfLoop,
GraphElem* uniq_clus_vec, GraphWeight* counter,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphWeight* vDegree_vec,
GraphElem* localTarget,
GraphWeight* clusterWeight,
const double constant,
const GraphElem base, const GraphElem bound
)
{
__shared__ int shared_num_uniq_cl[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_my_counter[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_curGain[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Index[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Size[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_maxGain[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxIndex[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxSize[S_BLOCK_TILE];
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
shared_num_uniq_cl[ii] = 0;
if(i < nv) {
shared_num_uniq_cl[ii] = uniq_clus_vec[i];
}
/// Create cooperative groups
auto g = cg::this_thread_block();
// auto tileIdx = g.thread_rank()/tile_sz;
#if __cuda_arch__ >= 700
auto tile = cg::partition<tile_sz>(cg::this_thread_block());
#else
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
#endif
auto tileIdx = g.thread_rank()/tile.size();
unsigned ti = tileIdx*tile.size();
GraphWeight *my_counter = &t_my_counter[tileIdx * tile.size()];
GraphWeight *shared_curGain = &t_shared_curGain[tileIdx * tile.size()];
GraphElem *shared_Index = &t_shared_Index[tileIdx * tile.size()];
GraphElem *shared_Size = &t_shared_Size[tileIdx * tile.size()];
GraphWeight *shared_maxGain = &t_shared_maxGain[tileIdx];
GraphElem *shared_maxIndex = &t_shared_maxIndex[tileIdx];
GraphElem *shared_maxSize = &t_shared_maxSize[tileIdx];
GraphElem num_cluster = 0;
GraphWeight ay = 0.0, eiy = 0.0;
GraphWeight eix;
GraphElem size;
GraphWeight curGain = 0.0;
GraphWeight vDegree, ax;
GraphElem cc;
// GraphWeight my_counter;
GraphWeight currDegree;
tile.sync();
for( int wii = 0; wii < tile.size(); wii++) {
num_cluster = shared_num_uniq_cl[tileIdx*tile.size()+wii];
if(num_cluster >= (GraphElem)tile_sz && num_cluster <= CUT_SIZE_NUM_EDGES12) {
if(tile.thread_rank() == 0) shared_maxGain[0] = 0.0;
tile.sync();
__syncwarp();
shared_Index[tile.thread_rank()] = 0;
GraphElem ver_loc = (GraphElem)(blockIdx.x*blockDim.x+tileIdx*tile.size()+wii);
cc = currComm[ver_loc];
if(tile.thread_rank() == 0)
{
if(cc >= bound) {
shared_maxIndex[0] = ocurrComm[ver_loc];
} else {
shared_maxIndex[0] = cc;
}
shared_maxSize[0] = localCinfo_size[cc - base];
}
tile.sync();
__syncwarp();
my_counter[tile.thread_rank()] = counter[blockIdx.x*blockDim.x+tileIdx*tile.size()+wii];
eix = my_counter[tile.thread_rank()] - selfLoop[ver_loc];
vDegree = vDegree_vec[ver_loc];
currDegree = localCinfo_degree[cc - base];
ax = currDegree - vDegree;
for(int k = 0; k < ((num_cluster-1)/tile.size()+1); k++)
{
GraphElem thread_indx = k*tile.size() + tile.thread_rank();
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
if(thread_indx < num_cluster) {
cg::coalesced_group active = cg::coalesced_threads();
GraphElem tcomm = clmap_comm[
clmap_loc[ver_loc]+thread_indx];
ay = localCinfo_degree[tcomm - base];
eiy = clmap_weight[
clmap_loc[ver_loc]+thread_indx];
curGain = 2.0 * (eiy - eix) - 2.0 * vDegree * (ay - ax) * constant;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
if(tcomm >= bound) {
shared_Index[tile.thread_rank()] = localCinfo_oComm[tcomm - bound];
} else {
shared_Index[tile.thread_rank()] = tcomm;
}
if((curGain > shared_maxGain[0]) && tcomm != cc ||
(curGain == shared_maxGain[0] && curGain != 0.0 && tcomm != cc &&
shared_Index[tile.thread_rank()] < shared_maxIndex[0]) ) {
shared_curGain[tile.thread_rank()] = curGain;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
} else {
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
shared_Size[tile.thread_rank()] = 0;
}
/// Perform reduction
active.sync();
#pragma unroll
for (int s =1; s < tile.size(); s *=2)
{
int indx = 2 * s * tile.thread_rank();
// int indx = tileIdx*tile.size() + 2 * s * tile.thread_rank();
if(indx < tile.size()) {
if(shared_Index[indx+s] != -1) {
if((shared_curGain[indx+s] > shared_curGain[indx]) ||
(shared_Index[indx] == -1) ||
(shared_curGain[indx+s] == shared_curGain[indx] &&
shared_curGain[indx+s] != 0.0 && shared_Index[indx] != -1 &&
shared_Index[indx+s] < shared_Index[indx])
) {
shared_curGain[indx] = shared_curGain[indx+s];
shared_Index[indx] = shared_Index[indx+s];
shared_Size[indx] = shared_Size[indx+s];
}
} else if(shared_Index[indx+s] == -1 && shared_Index[indx] == -1) {
shared_curGain[indx] = 0.0;
shared_Index[indx] = -1;
shared_Size[indx] = 0;
}
}
active.sync();
}
if(tile.thread_rank() == 0) {
if(shared_curGain[0] > shared_maxGain[0] ||
shared_curGain[0] == shared_maxGain[0] &&
shared_curGain[0] != 0.0 &&
shared_Index[0] < shared_maxIndex[0]
) {
shared_maxGain[0] = shared_curGain[0];
shared_maxIndex[0] = shared_Index[0];
shared_maxSize[0] = shared_Size[0];
}
}
active.sync();
}
tile.sync();
__syncwarp();
}
if(tile.thread_rank() == 0) {
GraphElem currSize = localCinfo_size[cc - base];
if(cc >= bound) cc = ocurrComm[ver_loc];
if((shared_maxSize[0] == 1) &&
(currSize == 1) &&
(shared_maxIndex[0] > cc)) {
shared_maxIndex[0] = cc;
}
clusterWeight[ver_loc] += counter[ver_loc];
localTarget[ver_loc] = shared_maxIndex[0];
}
tile.sync();
__syncwarp();
}
tile.sync();
__syncwarp();
}
/// Now implement for vertices with num_clusters < 4
// if(i >= nv) return;
curGain = 0.0;
GraphWeight maxGain = 0.0;
num_cluster = shared_num_uniq_cl[ii];
if(num_cluster < (GraphElem)tile_sz && num_cluster > 0 && i < nv) {
cc = currComm[i];
GraphElem maxIndex;
if(cc >= bound) {
maxIndex = ocurrComm[i];
} else {
maxIndex = cc;
}
localTarget[i] = -1; // cc;
GraphElem currSize = localCinfo_size[cc - base];
currDegree = localCinfo_degree[cc - base];
GraphElem maxSize = currSize;
t_my_counter[ii] = counter[i];
eix = t_my_counter[ii] - selfLoop[i];
vDegree = vDegree_vec[i];
ax = currDegree - vDegree;
GraphElem tcomm, otcomm;
for(GraphElem k = 0; k < num_cluster; k++) {
tcomm = clmap_comm[clmap_loc[i]+k];
if (tcomm != cc) {
ay = localCinfo_degree[tcomm - base];
size = localCinfo_size[tcomm - base];
eiy = clmap_weight[clmap_loc[i]+k];
curGain = 2.0 * (eiy - eix) - 2.0 * vDegree * (ay - ax) * constant;
if(tcomm >= bound) {
otcomm = localCinfo_oComm[tcomm - bound];
} else {
otcomm = tcomm;
}
if((curGain > maxGain) ||
(curGain == maxGain && curGain != 0.0 && otcomm < maxIndex) ) {
maxGain = curGain;
maxIndex = otcomm;
maxSize = size;
}
}
}
if(cc >= bound) cc = ocurrComm[i];
if((maxSize == 1) && (currSize == 1) && (maxIndex > cc)) {
maxIndex = cc;
}
clusterWeight[i] += counter[i];
localTarget[i] = maxIndex;
} else if(num_cluster == 0 && i < nv) {
localTarget[i] = ocurrComm[i];
}
}
template<int tile_sz>
__global__
void distGetMaxIndex_large_new(
const int me, const int numIters,
const GraphElem nv, GraphElem nv_chunk_size,
const GraphElem size_lt_cs2, GraphElem* list_lt_cs2,
GraphElem max_comm_size,
GraphElem* unique_comm_array_g,
GraphWeight* unique_weight_array_g,
GraphElem* e0, // GraphElem* e1,
GraphElem* graph_edgeList_tail, GraphWeight* graph_edgeList_weight,
GraphElem* currComm,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphElem* List_numEdges,
GraphElem* uniq_clus_vec, GraphWeight* counter,
const GraphElem base // , const GraphElem bound
) {
// __shared__ GraphElem t_shared_begin_loc[FINDING_UNIQCOMM_BLOCK_TILE];
// __shared__ GraphElem t_shared_comm[FINDING_UNIQCOMM_BLOCK_SIZE];
// __shared__ GraphWeight t_shared_weight[FINDING_UNIQCOMM_BLOCK_SIZE];
/// Create cooperative groups
auto thb_g = cg::this_thread_block();
auto tileIdx = thb_g.thread_rank()/tile_sz;
// GraphElem *shared_begin_loc = &t_shared_begin_loc[tileIdx];
// GraphElem *shared_comm = &t_shared_comm[tileIdx];
// GraphWeight* shared_weight = &t_shared_weight[tileIdx];
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
int ii = threadIdx.x;
GraphElem* unique_comm_array =
&unique_comm_array_g[blockIdx.x*FINDING_UNIQCOMM_ARRAY_SIZE];
GraphWeight* unique_weight_array =
&unique_weight_array_g[blockIdx.x*FINDING_UNIQCOMM_ARRAY_SIZE];
for(GraphElem i_nv = 0; i_nv < nv_chunk_size; ++i_nv) {
GraphElem i_index = FINDING_UNIQCOMM_NUM_BLOCKS * i_nv + blockIdx.x;
if(i_index < size_lt_cs2) {
GraphElem i = list_lt_cs2[i_index];
if(i < nv) {
GraphElem cc = currComm[i];
GraphElem num_edges = List_numEdges[i];
if(num_edges > CUT_SIZE_NUM_EDGES1) {
GraphElem clmap_loc_i = clmap_loc[i];
// if(threadIdx.x == 0)
// printf("me[%d]; blockIdx.x[%d]; i[%ld]; base[%ld]\n", me, blockIdx.x, i, base);
//
for(GraphElem k = 0; k < ((max_comm_size*FINDING_UNIQCOMM_FACTOR-1)/thb_g.size()+1); k++) {
// GraphElem thread_indx = k*tile.size() + tile.thread_rank();
GraphElem thread_indx = k*thb_g.size() + thb_g.thread_rank();
if(thread_indx < max_comm_size*FINDING_UNIQCOMM_FACTOR) {
unique_comm_array[thread_indx] = -1;
unique_weight_array[thread_indx] = 0.0;
}
}
thb_g.sync();
// if(thb_g.thread_rank() == 0)
// printf("me[%d]; i[%ld]; num_edges[%ld]\n", me, i, num_edges);
//
for(GraphElem k = 0; k < ((num_edges-1)/thb_g.size()+1); k++) {
// GraphElem thread_indx = k*tile.size() + tile.thread_rank();
GraphElem thread_indx = k*thb_g.size() + thb_g.thread_rank();
if(thread_indx < num_edges) {
GraphElem th_tail_indx = e0[i]+thread_indx;
GraphElem tail = graph_edgeList_tail[th_tail_indx];
// unique_comm_array[currComm[tail - base]] = 1;
// if(i == 169230)
// printf("me[%d]; thread_indx[%ld]; currComm[%ld]\n", me, thread_indx, currComm[tail - base]);
#if __cuda_arch__ >= 600
atomicAdd(&unique_comm_array[currComm[tail - base]-base], 1);
atomicAdd(&unique_weight_array[currComm[tail - base]-base],
graph_edgeList_weight[th_tail_indx]);
#else
#ifndef USE_32_BIT_GRAPH
my_func_atomicAdd(&unique_comm_array[currComm[tail - base]-base], 1);
my_func_atomicAdd(&unique_weight_array[currComm[tail - base]-base],
graph_edgeList_weight[th_tail_indx]);
#else
atomicAdd(&unique_comm_array[currComm[tail - base]-base], 1);
atomicAdd(&unique_weight_array[currComm[tail - base]-base],
graph_edgeList_weight[th_tail_indx]);
#endif
#endif
// printf("new vertex[%ld]; comm_array[%ld]; weight_array[%e] \n",
// i, currComm[tail - base],
// unique_weight_array[currComm[tail - base]]);
}
}
thb_g.sync();
// Make unique cluster vectors of comm and weights
for(GraphElem k = 0; k < ((max_comm_size*FINDING_UNIQCOMM_FACTOR-1)/thb_g.size()+1); k++) {
GraphElem thread_indx = k*thb_g.size() + thb_g.thread_rank();
if(thread_indx < max_comm_size*FINDING_UNIQCOMM_FACTOR) {
if(unique_comm_array[thread_indx] != -1) {
cg::coalesced_group active = cg::coalesced_threads();
GraphElem index_loc;
if(active.thread_rank() == 0) {
#if __cuda_arch__ >= 600
index_loc = atomicAdd(&uniq_clus_vec[i], active.size());
#else
#ifndef USE_32_BIT_GRAPH
index_loc = my_func_atomicAdd(&uniq_clus_vec[i], active.size());
#else
index_loc = atomicAdd(&uniq_clus_vec[i], active.size());
#endif
#endif
// printf("vertex using distGetMaxIndex_large_new[%ld]; num_edges[%ld]; active.size[%d]\n"
// , i, num_edges, active.size());
}
active.sync();
// printf("new vertex[%ld]; thread_index[%ld]; shared_begin_loc[0][%ld] \n",
// i, thread_indx, shared_begin_loc[0]);
if (cc == thread_indx+base) {
counter[i] = unique_weight_array[thread_indx];
// printf("vertex using distGetMaxIndex_large_new[%ld]; counter[%e]\n", i, counter[i]);
}
clmap_comm[clmap_loc_i+active.shfl(index_loc, 0)+active.thread_rank()] =
thread_indx+base;
clmap_weight[clmap_loc_i+active.shfl(index_loc, 0)+active.thread_rank()] =
unique_weight_array[thread_indx];
// clmap_comm[clmap_loc_i+my_loc] = thread_indx;
// clmap_weight[clmap_loc_i+my_loc] = unique_weight_array[thread_indx];
// printf("new vertex[%ld]; clmap_comm[%ld]; clmap_weight[%e] \n",
// i, clmap_comm[shared_begin_loc[0]-active.size()+active.thread_rank()],
// clmap_weight[shared_begin_loc[0]-active.size()+active.thread_rank()]);
}
}
thb_g.sync();
}
thb_g.sync();
// if(thb_g.thread_rank() == 0)
// printf("me[%d]; vertex[%ld]; cc[%ld]; uniq_cls_vec_size[%ld]\n",
// me, i, cc, uniq_clus_vec[i]);
} // (num_edges > CUT_SIZE_NUM_EDGES1) loop
} // if(i >= nv)
} // if(i_index >= size_lt_cs2)
} // chunk size loop
}
template<int tile_sz>
__global__
void distGetMaxIndex_large(
GraphElem nv,
GraphElem* e0, GraphElem* e1,
GraphElem* graph_edgeList_tail, GraphWeight* graph_edgeList_weight,
GraphElem* currComm,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphElem* List_numEdges,
GraphElem* uniq_clus_vec, GraphWeight* counter,
const GraphElem base, const GraphElem bound
) {
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
__shared__ GraphElem t_shared_num_edges[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_weight[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_clmap_loc[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_red_shared_weight[S_BLOCK_TILE];
t_shared_num_edges[ii] = 0;
// if(i >= nv) return;
if(i < nv) {
t_shared_num_edges[ii] = List_numEdges[i];
t_shared_clmap_loc[ii] = clmap_loc[i];
}
/// Create cooperative groups
auto thb_g = cg::this_thread_block();
auto tileIdx = thb_g.thread_rank()/tile_sz;
GraphElem* shared_num_edges = &t_shared_num_edges[tileIdx * tile_sz];
GraphWeight* shared_weight= &t_shared_weight[tileIdx * tile_sz];
GraphElem* shared_clmap_loc = &t_shared_clmap_loc[tileIdx * tile_sz];
GraphWeight *shared_red_weight = &t_red_shared_weight[tileIdx];
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
// tile.sync();
thb_g.sync();
// GraphElem tcomm;
GraphElem Tail;
GraphElem num_edges;
/// Cater to only vertices with num of edges >= tile_sz
/// each thread work on each edge
// for( int wii = 0; wii < tile.size(); wii++)
for( int wii = 0; wii < thb_g.size(); wii++) {
num_edges = t_shared_num_edges[wii];
if(num_edges > CUT_SIZE_NUM_EDGES1) {
// for(int k = 0; k < ((num_edges-1)/tile.size()+1); k++)
for(int k = 0; k < ((num_edges-1)/thb_g.size()+1); k++) {
// GraphElem thread_indx = k*tile.size() + tile.thread_rank();
GraphElem thread_indx = k*thb_g.size() + thb_g.thread_rank();
if(thread_indx < num_edges) {
GraphElem th_tail_indx =
e0[blockIdx.x*blockDim.x+wii]+thread_indx;
Tail = graph_edgeList_tail[th_tail_indx];
clmap_comm[t_shared_clmap_loc[wii]+thread_indx] =
currComm[Tail - base];
clmap_weight[t_shared_clmap_loc[wii]+thread_indx] =
graph_edgeList_weight[th_tail_indx];
}
}
}
}
thb_g.sync();
/// Now find out unique clusters and accumulate weights
GraphElem cc;
// for( int wii = 0; wii < tile_sz; wii++)
for( int wii = 0; wii < thb_g.size(); wii++) {
num_edges = t_shared_num_edges[wii];
// if (blockIdx.x*blockDim.x+tileIdx*tile_sz+wii < nv)
if (blockIdx.x*blockDim.x+wii < nv)
// cc = currComm[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii];
cc = currComm[blockIdx.x*blockDim.x+wii];
GraphWeight tile_sum_Weight;
if(num_edges > CUT_SIZE_NUM_EDGES1) {
GraphElem store_indx = -1;
// if (tile.thread_rank() == 0)
// uniq_clus_vec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii] = 0;
if (thb_g.thread_rank() == 0)
uniq_clus_vec[blockIdx.x*blockDim.x+wii] = 0;
for(GraphElem ko = 0; ko < num_edges; ko++) {
tile_sum_Weight = 0.0;
GraphElem comm_pos = clmap_comm[t_shared_clmap_loc[wii]+ko];
if(comm_pos != -1) {
// if (tile.thread_rank() == 0)
if (thb_g.thread_rank() == 0) {
store_indx += 1;
clmap_comm[t_shared_clmap_loc[wii]+store_indx] = comm_pos;
}
// if (tile.thread_rank() == 0)
if (thb_g.thread_rank() == 0)
clmap_weight[t_shared_clmap_loc[wii]+store_indx] =
clmap_weight[t_shared_clmap_loc[wii]+ko];
// if (tile.thread_rank() == 0)
if (thb_g.thread_rank() == 0)
uniq_clus_vec[blockIdx.x*blockDim.x+wii] += 1;
// if (tile.thread_rank() == 0)
if (thb_g.thread_rank() == 0)
shared_red_weight[0] = 0.0;
shared_weight[thb_g.thread_rank()] = 0.0;
// for(GraphElem k = 0; k < ((num_edges-1)/tile.size()+1); k++)
for(GraphElem k = 0; k < ((num_edges-ko-1)/thb_g.size()+1); k++) {
GraphElem thread_indx = ko + 1 + k*thb_g.size() + thb_g.thread_rank();
if(thread_indx < num_edges) {
if (comm_pos == clmap_comm[t_shared_clmap_loc[wii]+thread_indx]) {
shared_weight[thb_g.thread_rank()] +=
clmap_weight[t_shared_clmap_loc[wii]+thread_indx];
clmap_comm[t_shared_clmap_loc[wii]+thread_indx] = -1;
}
}
}
// tile.sync();
thb_g.sync();
/// Perform reduction to accumulate weights
tile_sum_Weight = weight_reduce(thb_g, t_shared_weight,
t_shared_weight[thb_g.thread_rank()]);
// shared_weight[tile.thread_rank()]);
if(thb_g.thread_rank() ==0) shared_red_weight[0] += tile_sum_Weight;
// thb_g.sync();
// /// Perform reduction at threadblock level
// for (int s =1; s < S_BLOCK_TILE; s *=2)
// {
// int indx = 2 * s * thb_g.thread_rank();
// if(indx < S_BLOCK_TILE) {
// shared_red_weight[indx] = shared_red_weight[indx+s];
// }
// }
// thb_g.sync();
/// Add weights to cluster map
// if (tile.thread_rank() == 0)
if (thb_g.thread_rank() == 0) {
// clmap_weight[t_shared_clmap_loc[wii]+store_indx] += tile_sum_Weight;
clmap_weight[t_shared_clmap_loc[wii]+store_indx] +=
shared_red_weight[0];
// printf("vertex[%ld]; weight[%ld][%e]; ko[%ld] comm_pos[%ld]\n",
// blockIdx.x*blockDim.x+wii, t_shared_clmap_loc[wii]+store_indx,
// clmap_weight[t_shared_clmap_loc[wii]+store_indx], ko, comm_pos);
// if(comm_pos == cc) counter[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii] =
// clmap_weight[shared_clmap_loc[wii]+store_indx];
if(comm_pos == cc) counter[blockIdx.x*blockDim.x+wii] =
clmap_weight[t_shared_clmap_loc[wii]+store_indx];
}
}
// tile.sync();
thb_g.sync();
}
}
} // end of old implementation
}
template<int tile_sz>
__global__
void distGetMaxIndex(
GraphElem nv,
GraphElem* e0, // GraphElem* e1,
GraphElem* graph_edgeList_tail, GraphWeight* graph_edgeList_weight,
GraphElem* currComm,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphElem* List_numEdges,
GraphElem* uniq_clus_vec, GraphWeight* counter,
const GraphElem base // , const GraphElem bound
) {
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
__shared__ GraphElem t_shared_num_edges[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_weight[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_clmap_loc[S_THREADBLOCK_SIZE];
t_shared_num_edges[ii] = 0;
// if(i >= nv) return;
if(i < nv) {
t_shared_num_edges[ii] = List_numEdges[i];
t_shared_clmap_loc[ii] = clmap_loc[i];
}
/// Create cooperative groups
auto g = cg::this_thread_block();
auto tileIdx = g.thread_rank()/tile_sz;
GraphElem* shared_num_edges = &t_shared_num_edges[tileIdx * tile_sz];
GraphWeight* shared_weight= &t_shared_weight[tileIdx * tile_sz];
GraphElem* shared_clmap_loc = &t_shared_clmap_loc[tileIdx * tile_sz];
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
tile.sync();
// GraphElem tcomm;
GraphElem Tail;
GraphElem num_edges;
/// Cater to only vertices with num of edges >= tile_sz
/// each thread work on each edge
for( int wii = 0; wii < tile_sz; wii++) {
num_edges = shared_num_edges[wii];
if(num_edges >= (GraphElem)tile_sz && num_edges <= CUT_SIZE_NUM_EDGES1) {
for(int k = 0; k < ((num_edges-1)/tile.size()+1); k++) {
GraphElem thread_indx = k*tile.size() + tile.thread_rank();
if(thread_indx < num_edges) {
GraphElem th_tail_indx =
e0[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii]+thread_indx;
Tail = graph_edgeList_tail[th_tail_indx];
clmap_comm[shared_clmap_loc[wii]+thread_indx] =
currComm[Tail - base];
clmap_weight[shared_clmap_loc[wii]+thread_indx] =
graph_edgeList_weight[th_tail_indx];
}
}
}
}
tile.sync();
/// Cater to only vertices with num of edges < 16
/// each thread work on each vertex
num_edges = shared_num_edges[tile.thread_rank()];
if(num_edges < (GraphElem)tile_sz && num_edges > 0 && i < nv) {
GraphElem edge_low = e0[i];
for (GraphElem j = 0; j < num_edges; j++) {
Tail = graph_edgeList_tail[edge_low+j];
clmap_comm[shared_clmap_loc[tile.thread_rank()]+j] = currComm[Tail - base];
clmap_weight[shared_clmap_loc[tile.thread_rank()]+j] = graph_edgeList_weight[edge_low+j];
}
}
/// Now find out unique clusters and accumulate weights
GraphElem cc;
for( int wii = 0; wii < tile_sz; wii++) {
num_edges = shared_num_edges[wii];
if (blockIdx.x*blockDim.x+tileIdx*tile_sz+wii < nv)
cc = currComm[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii];
GraphWeight tile_sum_Weight;
if(num_edges >= (GraphElem)tile_sz && num_edges <= CUT_SIZE_NUM_EDGES1) {
GraphElem store_indx = -1;
if (tile.thread_rank() == 0)
uniq_clus_vec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii] = 0;
for(GraphElem ko = 0; ko < num_edges; ko++) {
tile_sum_Weight = 0.0;
GraphElem comm_pos = clmap_comm[shared_clmap_loc[wii]+ko];
if(comm_pos != -1) {
if (tile.thread_rank() == 0) {
store_indx += 1;
clmap_comm[shared_clmap_loc[wii]+store_indx] = comm_pos;
}
if (tile.thread_rank() == 0)
clmap_weight[shared_clmap_loc[wii]+store_indx] =
clmap_weight[shared_clmap_loc[wii]+ko];
if (tile.thread_rank() == 0)
uniq_clus_vec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii] += 1;
for(GraphElem k = 0; k < ((num_edges-ko-1)/tile.size()+1); k++) {
GraphElem thread_indx = ko + 1 + k*tile.size() + tile.thread_rank();
shared_weight[tile.thread_rank()] = 0.0;
if(thread_indx < num_edges) {
if (comm_pos == clmap_comm[shared_clmap_loc[wii]+thread_indx]) {
shared_weight[tile.thread_rank()] =
clmap_weight[shared_clmap_loc[wii]+thread_indx];
clmap_comm[shared_clmap_loc[wii]+thread_indx] = -1;
}
}
tile.sync();
/// Perform reduction to accumulate weights
tile_sum_Weight += weight_reduce_sum_tile_shfl<tile_sz>
(tile, shared_weight[tile.thread_rank()]);
}
/// Add weights to cluster map
if (tile.thread_rank() == 0) {
clmap_weight[shared_clmap_loc[wii]+store_indx] += tile_sum_Weight;
if(comm_pos == cc) counter[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii] =
clmap_weight[shared_clmap_loc[wii]+store_indx];
}
}
tile.sync();
}
}
}
/// repeate for num_edges < 16
// if( i < nv) return;
num_edges = shared_num_edges[tile.thread_rank()];
if(num_edges < (GraphElem)tile_sz && num_edges > 0 && i < nv) {
cc = currComm[i];
uniq_clus_vec[i] = 0;
GraphElem store_indx = -1;
int counter_switch = 1;
for(GraphElem ko = 0; ko < num_edges; ko++) {
GraphElem comm_pos = clmap_comm[shared_clmap_loc[tile.thread_rank()]+ko];
// GraphElem comm_count = 1;
if(comm_pos != -1) {
uniq_clus_vec[i] += 1;
store_indx += 1;
clmap_comm[shared_clmap_loc[tile.thread_rank()]+store_indx] = comm_pos;
clmap_weight[shared_clmap_loc[tile.thread_rank()]+store_indx] =
clmap_weight[shared_clmap_loc[tile.thread_rank()]+ko];
for(GraphElem k = ko+1; k < num_edges; k++) {
if (comm_pos == clmap_comm[shared_clmap_loc[tile.thread_rank()]+k]) {
clmap_comm[shared_clmap_loc[tile.thread_rank()]+k] = -1;
clmap_weight[shared_clmap_loc[tile.thread_rank()]+store_indx] +=
clmap_weight[shared_clmap_loc[tile.thread_rank()]+k];
}
}
if (comm_pos == cc && counter_switch == 1) {
counter_switch = 0;
counter[i] += clmap_weight[shared_clmap_loc[tile.thread_rank()]+store_indx];
}
}
}
}
}
template<int tile_sz>
__global__
void distBuildLocalMapCounter(
/// Accumulate selfLoopVec for all vertices
GraphElem nv,
GraphElem* e0, GraphElem* e1,
GraphElem* graph_edgeList_tail, GraphWeight* graph_edgeList_weight,
GraphElem* List_numEdges, GraphWeight* selfLoopVec,
const GraphElem base // , const GraphElem bound
) {
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
__shared__ GraphElem t_shared_num_edges[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_block_weight[S_THREADBLOCK_SIZE];
t_shared_num_edges[ii] = 0;
// if(i >= nv) return;
if(i < nv) {
t_shared_num_edges[ii] = List_numEdges[i];
}
#if __cuda_arch__ >= 700
auto tile = cg::partition<tile_sz>(cg::this_thread_block());
#else
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
#endif
auto g = cg::this_thread_block();
auto tileIdx = g.thread_rank()/tile_sz;
GraphElem *shared_num_edges = &t_shared_num_edges[tileIdx * tile_sz];
GraphWeight *shared_block_weight = &t_shared_block_weight[tileIdx * tile_sz];
tile.sync();
/// Cater to only vertices with num of edges >= tile_sz
/// and work on each vertex
/// this implementation uses cooperative groups and
/// uses a large thread block size say 128 to increase occupancy
for( int wii = 0; wii < tile_sz; wii++) {
GraphElem num_edges = shared_num_edges[wii];
shared_block_weight[tile.thread_rank()] = 0.0;
if(num_edges >= (GraphElem)tile_sz) {
for(int k = 0; k < ((num_edges-1)/tile.size()+1); k++) {
GraphElem thread_indx = k*tile.size() + tile.thread_rank();
if(thread_indx < num_edges) {
if(graph_edgeList_tail[
e0[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii]+thread_indx] ==
blockIdx.x * blockDim.x + tileIdx*tile_sz+wii + base)
shared_block_weight[tile.thread_rank()] +=
graph_edgeList_weight[
e0[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii]+thread_indx];
}
}
tile.sync();
GraphWeight tile_sum_Weight = weight_reduce_sum_tile_shfl<tile_sz>
(tile, shared_block_weight[tile.thread_rank()]);
if (tile.thread_rank() == 0)
#if __cuda_arch__ >= 600
atomicAdd(&selfLoopVec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii], tile_sum_Weight);
#else
#ifndef USE_32_BIT_GRAPH
my_func_atomicAdd(&selfLoopVec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii], tile_sum_Weight);
#else
atomicAdd(&selfLoopVec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii], tile_sum_Weight);
#endif
#endif
}
}
/// Cater to only vertices with num of edges >= 16
if(i >= nv) return;
GraphWeight selfLoop = 0;
if(shared_num_edges[tile.thread_rank()] < (GraphElem)tile_sz) {
for (GraphElem j = e0[i]; j < e1[i]; j++) {
if(graph_edgeList_tail[j] == i + base)
selfLoop += graph_edgeList_weight[j];
}
selfLoopVec[i] = selfLoop;
}
}
template<int tile_sz>
__global__
void count_size_clmap (GraphElem nv, GraphElem* NumClusters,
GraphElem* clmap_loc, GraphElem* size_clmap,
GraphElem* size_lt_ts, GraphElem* list_lt_ts,
GraphElem* size_lt_cs1, GraphElem* list_lt_cs1,
GraphElem* size_lt_cs2, GraphElem* list_lt_cs2,
GraphElem* e0, GraphElem* e1, GraphElem* List_numEdges) {
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
#ifndef USE_32_BIT_GRAPH
__shared__ long shared_mem_size[MS_THREADBLOCK_SIZE];
__shared__ long shared_begin_loc[MS_BLOCK_TILE];
#else
__shared__ int shared_mem_size[MS_THREADBLOCK_SIZE];
__shared__ int shared_begin_loc[MS_BLOCK_TILE];
#endif
shared_mem_size[ii] = 0;
GraphElem numEdges = 0;
#ifdef DEBUG_CUVITE
int my_mem_block = 0;
#endif
// GraphElem numEdges;
if(i < nv) {
numEdges = e1[i] - e0[i];
List_numEdges[i] = numEdges;
// if(numEdges >= 90 && numEdges < 96)
// printf("vertex[%ld]; numEdges[%ld]\n",i, numEdges);
#ifdef DEBUG_CUVITE
if(numEdges > 0) my_mem_block = 1;
#endif
// printf("vertex[%ld]; num_edges[%d] \n", i, numEdges);
// shared_mem_size[ii] = numEdges * sizeof(GraphElem);
shared_mem_size[ii] = (GraphElem) numEdges;
// printf("vertex[%ld]; shared_mem_size[%d] \n", i, shared_mem_size[ii]);
}
auto g = cg::this_thread_block();
#if __cuda_arch__ >= 700
auto tile = cg::partition<tile_sz>(cg::this_thread_block());
#else
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
#endif
auto tileIdx = g.thread_rank()/tile_sz;
GraphElem *t_shared_mem_size = &shared_mem_size[tileIdx * tile_sz];
GraphElem *t_shared_begin_loc = &shared_begin_loc[tileIdx];
tile.sync();
/// Mem loc inside the block
GraphElem my_mem_loc = 0;
for(int s = 0; s < tile.size(); s++) {
if(tile.thread_rank() > s) my_mem_loc += t_shared_mem_size[s];
}
tile.sync();
#if 1
/// Accumulate number of clusters and mem requirement
for(int s = 1; s < tile.size(); s *= 2) {
int indx = 2 * s * tile.thread_rank();
if(indx < tile.size()) {
t_shared_mem_size[indx] += t_shared_mem_size[indx+s];
}
tile.sync();
}
#else
GraphElem tile_sum_size = reduce_sum_tile_shfl<tile_sz>
(tile, t_shared_mem_size[tile.thread_rank()]);
#endif
#ifdef DEBUG_CUVITE
int tile_sum_mem = reduce_sum_tile_shfl<tile_sz>(tile, my_mem_block);
#endif
if(tile.thread_rank() == 0) {
// printf("shared_mem_block[%d]; shared_mem_size[%ld]; blockIdx.x[%d] \n",
// tile_sum_mem, shared_mem_size[0], blockIdx.x);
// printf("shared_mem_block[%d]; shared_mem_size[%ld]; blockIdx.x[%d] \n",
// tile_sum_mem, tile_sum_size, blockIdx.x);
#if __cuda_arch__ >= 600
#ifdef DEBUG_CUVITE
atomicAdd(&NumClusters[0], tile_sum_mem);
#endif
t_shared_begin_loc[0] = atomicAdd(&size_clmap[0], t_shared_mem_size[0]);
// t_shared_begin_loc[0] = atomicAdd(&size_clmap[0], tile_sum_size);
#else
#ifdef DEBUG_CUVITE
my_func_atomicAdd(&NumClusters[0], tile_sum_mem);
#endif
#ifndef USE_32_BIT_GRAPH
t_shared_begin_loc[0] = my_func_atomicAdd(&size_clmap[0], t_shared_mem_size[0]);
#else
t_shared_begin_loc[0] = atomicAdd(&size_clmap[0], t_shared_mem_size[0]);
#endif
#endif
}
tile.sync();
#ifdef DEBUG_CUVITE
// if(i == 0) printf("Number of edges in a block[%ld]\n", NumClusters[0]);
#endif
if(i >= nv) return;
if(numEdges > 0) {
clmap_loc[i] = t_shared_begin_loc[0] + my_mem_loc;
// printf("vertex[%ld]; shared_begin_loc0[%d]; numEdges[%ld]; my_mem_loc[%ld]; clmap_loc[%ld] \n",
// i, t_shared_begin_loc[0], numEdges, my_mem_loc, clmap_loc[i]);
} else {
clmap_loc[i] = -1;
}
/// Group vertices based on degree
tile.sync();
if(numEdges <= (GraphElem)tile_sz) {
cg::coalesced_group active = cg::coalesced_threads();
#if __cuda_arch__ >= 600
if(active.thread_rank() == 0)
t_shared_begin_loc[0] = atomicAdd(&size_lt_ts[0], active.size());
#else
#ifndef USE_32_BIT_GRAPH
if(active.thread_rank() == 0)
t_shared_begin_loc[0] = my_func_atomicAdd(&size_lt_ts[0], active.size());
#else
if(active.thread_rank() == 0)
t_shared_begin_loc[0] = atomicAdd(&size_lt_ts[0], active.size());
#endif
#endif
active.sync();
list_lt_ts[t_shared_begin_loc[0]+active.thread_rank()] = i;
}
tile.sync();
if(numEdges > (GraphElem)tile_sz && numEdges <= CUT_SIZE_NUM_EDGES1) {
cg::coalesced_group active = cg::coalesced_threads();
GraphElem index_loc;
#if __cuda_arch__ >= 600
if(active.thread_rank() == 0)
index_loc = atomicAdd(&size_lt_cs1[0], active.size());
#else
if(active.thread_rank() == 0)
#ifndef USE_32_BIT_GRAPH
index_loc = my_func_atomicAdd(&size_lt_cs1[0], active.size());
#else
index_loc = atomicAdd(&size_lt_cs1[0], active.size());
#endif
#endif
// active.sync();
list_lt_cs1[active.shfl(index_loc, 0)+active.thread_rank()] = i;
}
tile.sync();
if(numEdges > CUT_SIZE_NUM_EDGES1) {
// printf("vertex[%ld]; lt_cs2_num_edges[%ld] \n", i, numEdges);
cg::coalesced_group active = cg::coalesced_threads();
GraphElem index_loc;
#if __cuda_arch__ >= 600
if(active.thread_rank() == 0)
index_loc = atomicAdd(&size_lt_cs2[0], active.size());
#else
#ifndef USE_32_BIT_GRAPH
if(active.thread_rank() == 0)
index_loc = my_func_atomicAdd(&size_lt_cs2[0], active.size());
#else
if(active.thread_rank() == 0)
index_loc = atomicAdd(&size_lt_cs2[0], active.size());
#endif
#endif
// active.sync();
list_lt_cs2[active.shfl(index_loc, 0)+active.thread_rank()] = i;
}
}
__global__
void gpu_distExecuteLouvainIteration(
const GraphElem nv,
GraphElem* graph_edgeListIndexes,
GraphElem* GraphEdge_low, GraphElem* GraphEdge_high,
int me, const GraphElem base, const GraphElem bound
) {
// int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
if(i >= nv) return;
GraphElem e0, e1;
e0 = graph_edgeListIndexes[i];
e1 = graph_edgeListIndexes[i+1];
/// Store variables to global memory
GraphEdge_low[i] = e0;
GraphEdge_high[i] = e1;
}
template<class T>
__global__ void print_device_vector(T *given_vec, GraphElem size_vec)
{
GraphElem ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
if(i >= size_vec) return;
printf("i[%ld]; VEC_VALUE[%f]\n", i, given_vec[i]);
}
__global__ void print_device_vector2(GraphWeight* given_vec,
GraphElem size_vec)
{
// GraphElem ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
if(i >= size_vec) return;
printf("i[%ld]; VEC_VALUE[%f]\n", i, given_vec[i]);
}
void set_gpuDevices(int *me)
{
int num_gpuDevices;
hipGetDeviceCount(&num_gpuDevices);
#if 1
/// split MPI comm to get local node rank
/// hipSetDevice to local node rank
MPI_Comm loc_comm;
MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, *me,
MPI_INFO_NULL, &loc_comm);
int node_rank = -1;
MPI_Comm_rank(loc_comm,&node_rank);
// std::cout << "me:" << *me << "; node rank:" << node_rank << std::endl;
hipError_t cudaStat;
cudaStat = hipSetDevice(node_rank);
for (int dev_id = 0; dev_id < num_gpuDevices; dev_id++) {
if( node_rank%num_gpuDevices == dev_id) {
cudaStat = hipSetDevice(dev_id);
// std::cout << "me[" << *me << "]; node rank[" << node_rank
// << "]; dev_id[" << dev_id << "]" << std::endl;
}
}
if(cudaStat != hipSuccess)
printf("Process %d; ERROR DEVICE FAILED\n", *me);
MPI_Comm_free(&loc_comm);
#else
/// hipSetDevice to MPI rank
hipError_t cudaStat;
for (int dev_id = 0; dev_id < num_gpuDevices; dev_id++) {
if( *me%num_gpuDevices == dev_id) cudaStat = hipSetDevice(dev_id);
}
// cudaStat = hipSetDevice(2);
if(cudaStat != hipSuccess)
printf("Process %d; ERROR DEVICE FAILED\n", *me);
#endif
}
int gpu_for_louvain_iteration(
const GraphElem nv, const DistGraph &dg,
CommunityVector &currComm,
CommunityVector &targetComm,
GraphWeightVector &vDegree,
CommVector &localCinfo,
CommVector &localCupdate,
VertexCommMap &remoteComm,
const CommMap &remoteCinfo,
CommMap &remoteCupdate,
const double constantForSecondTerm,
GraphWeightVector &clusterWeight,
int me, int numIters, GpuGraph &gpu_graph)
{
if(nv <= 0) return 1;
const GraphElem base = dg.getBase(me), bound = dg.getBound(me);
#ifdef USE_HYBRID_CPU_GPU // Run hybrid CPU-GPU code
// create a temporary target buffer
std::vector<GraphElem> temp_targetComm_cpu = targetComm;
std::vector<GraphElem> temp_targetComm_gpu = targetComm;
static GraphElem num_vertex_cpu, num_vertex_gpu;
static double time_cpu, time_gpu;
if(numIters == 1)
{
time_cpu = 1.e0;
time_gpu = 1.e0;
}
if(time_cpu >= time_gpu){
num_vertex_cpu = num_vertex_cpu -
nv * (time_cpu - time_gpu) / (time_cpu + time_gpu) / 3;
}
if(time_cpu < time_gpu){
num_vertex_cpu = num_vertex_cpu +
nv * (time_gpu - time_cpu) / (time_cpu + time_gpu) / 3;
}
if(num_vertex_cpu <= 0) num_vertex_cpu = nv * 1/80;
if(num_vertex_cpu > nv ) num_vertex_cpu = nv * 9/10;
// if(numIters == 1) num_vertex_cpu = nv * 1/20;
if(numIters == 1) num_vertex_cpu = nv * 1/3;
// num_vertex_cpu = 0;
num_vertex_gpu = nv - num_vertex_cpu;
#ifdef PRINT_HYBRID
std::cout << "me[" << me << "]; nv: " << nv << "; num_vertex_gpu: " <<
num_vertex_gpu << "; num_vertex_cpu: " << num_vertex_cpu << std::endl;
#endif
int num_avail_threads = omp_get_num_threads();
const int maxNumThreads = omp_get_max_threads();
omp_set_num_threads(2);
omp_set_nested(1);
double t0 = timer();
#pragma omp parallel sections
{
#pragma omp section
{ //call CPU function
omp_set_num_threads(8);
#pragma omp parallel default(none), shared(clusterWeight, localCupdate, currComm, targetComm, \
vDegree, localCinfo, remoteCinfo, remoteComm, dg, remoteCupdate, me, \
temp_targetComm_cpu, num_vertex_gpu), \
firstprivate(constantForSecondTerm)
{
// distCleanCWandCU(nv, clusterWeight, localCupdate);
#ifdef OMP_SCHEDULE_RUNTIME
#pragma omp for schedule(runtime)
#else
#pragma omp for schedule(guided)
#endif
for (GraphElem i = num_vertex_gpu; i < nv; i++) {
distExecuteLouvainIteration_hybrid(i, dg, currComm, targetComm, vDegree, localCinfo,
localCupdate, remoteComm, remoteCinfo, remoteCupdate,
constantForSecondTerm, clusterWeight, me, temp_targetComm_cpu);
}
}
double t1 = timer();
time_cpu = t1 - t0;
#ifdef PRINT_HYBRID
std::cout << "me[" << me << "]; Time CPU: " << time_cpu << std::endl;
#endif
} // close cpu section
#pragma omp section
{ /// call GPU function
omp_set_num_threads(4);
if (num_vertex_gpu > 0) {
/// size equal to nv X GraphElem
int mem_size_GraphElem_nv = sizeof(GraphElem) * nv;
/// size equal to nv X GraphWeight
int mem_size_GraphWeight_nv = sizeof(GraphWeight) * nv;
/// All following have size = nv
GraphElem size_localCinfo = localCinfo.size();
GraphElem* temp_ModlocalCinfo_size = gpu_graph.getPinned_ModlocalCinfo_size();
GraphWeight* temp_ModlocalCinfo_degree = gpu_graph.getPinned_ModlocalCinfo_degree();
#pragma omp parallel default(none), \
shared(localCinfo, temp_ModlocalCinfo_size, temp_ModlocalCinfo_degree)
#pragma omp for schedule(guided)
for(int ii=0; ii<localCinfo.size(); ii++) {
temp_ModlocalCinfo_size[ii] = localCinfo[ii].size;
temp_ModlocalCinfo_degree[ii] = localCinfo[ii].degree;
}
/// Remote Community Info
/// First get the keys of remoteCinfo map
std::vector<GraphElem> temp_remoteCinfo_key = extract_keys_CommMap(remoteCinfo);
GraphElem size_remoteCinfo = remoteCinfo.size();
/// split RemoteCinfo into vectors for different struct elements
std::vector<GraphElem>temp_remoteCinfo_size =
extract_value_CommMap_size(remoteCinfo);
std::vector<GraphWeight>temp_remoteCinfo_degree =
extract_value_CommMap_degree(remoteCinfo);
/// now modify currComm to include remoteComm
GraphElem* temp_ModlocalCinfo_oComm = gpu_graph.getPinned_ModlocalCinfo_oComm();
ClusterLocalMap localCinfo_to_remoteCinfo_map;
ClusterLocalMap::const_iterator storedAlready;
GraphElem temp_counter_01 = 0;
std::vector<GraphElem> ModcurrComm = currComm;
for(int ii=0; ii<temp_remoteCinfo_key.size(); ii++) {
GraphElem temp_Comm = temp_remoteCinfo_key[ii];
if(temp_Comm < base || temp_Comm >= bound)
{
storedAlready = localCinfo_to_remoteCinfo_map.find(temp_Comm);
if(storedAlready == localCinfo_to_remoteCinfo_map.end()) {
localCinfo_to_remoteCinfo_map.insert(std::make_pair(
temp_Comm, (temp_counter_01+bound)));
temp_ModlocalCinfo_size[size_localCinfo+temp_counter_01] = temp_remoteCinfo_size[ii];
temp_ModlocalCinfo_degree[size_localCinfo+temp_counter_01] = temp_remoteCinfo_degree[ii];
temp_ModlocalCinfo_oComm[temp_counter_01] = temp_Comm;
temp_counter_01++;
}
}
}
GraphElem size_ModlocalCinfo = size_localCinfo+temp_counter_01;
GraphElem size_ModlocalCinfo_oComm = temp_counter_01;
std::vector<GraphElem>().swap(temp_remoteCinfo_key);
CommunityVector().swap(temp_remoteCinfo_size);
GraphWeightVector().swap(temp_remoteCinfo_degree);
// remoteComm is broken into 2 arrays
std::vector<GraphElem> temp_remoteComm_v;
temp_remoteComm_v = extract_vertex_VertexCommMap(remoteComm);
std::vector<GraphElem> temp_remoteComm_comm;
temp_remoteComm_comm = extract_comm_VertexCommMap(remoteComm);
/// Create map for remoteComm tail mapped to currComm
ClusterLocalMap remoteComm_to_currComm_map_v;
ClusterLocalMap::const_iterator storedAlready_v;
ClusterLocalMap::const_iterator storedAlready_comm;
GraphElem temp_counter_02 = 0;
GraphElem temp_tail;
GraphElem temp_comm, temp_comm_mapped;
// First modify currComm
#pragma omp parallel default(none), \
shared(ModcurrComm, localCinfo_to_remoteCinfo_map), \
private(temp_comm, storedAlready_comm)
#pragma omp for schedule(guided)
for(int ii = 0; ii < ModcurrComm.size(); ii++) {
temp_comm = ModcurrComm[ii];
if(temp_comm < base || temp_comm >= bound) {
storedAlready_comm = localCinfo_to_remoteCinfo_map.find(temp_comm);
ModcurrComm[ii] = storedAlready_comm->second;
}
}
// Next modify currComm to include remoteComm
for(int ii=0; ii<temp_remoteComm_comm.size(); ii++) {
temp_comm = temp_remoteComm_comm[ii];
temp_tail = temp_remoteComm_v[ii];
if(temp_comm < base || temp_comm >= bound) {
storedAlready_comm = localCinfo_to_remoteCinfo_map.find(temp_comm);
temp_comm_mapped = storedAlready_comm->second;
temp_remoteComm_comm[ii] = temp_comm_mapped;
ModcurrComm.push_back(temp_comm_mapped);
} else {
ModcurrComm.push_back(temp_comm);
/// check line below
}
if(temp_tail < base || temp_tail >= bound) {
storedAlready_v = remoteComm_to_currComm_map_v.find(temp_tail);
if(storedAlready_v == remoteComm_to_currComm_map_v.end()) {
if(temp_tail < base || temp_tail >= bound) {
remoteComm_to_currComm_map_v.insert(std::make_pair(
temp_tail, (bound + temp_counter_02) ));
temp_remoteComm_v[ii] = bound + temp_counter_02;
}
temp_counter_02++;
}
}
}
// }
std::vector<GraphElem>().swap(temp_remoteComm_v);
std::vector<GraphElem>().swap(temp_remoteComm_comm);
// comm_node_info remote_comm_info;
const Graph &g = dg.getLocalGraph();
GraphElem size_edgeListIndexes = g.edgeListIndexes.size();
GraphElem* temp_graph_edgeList_tail = gpu_graph.getPinned_edgeList_tail();
GraphWeight* temp_graph_edgeList_weight = gpu_graph.getPinned_edgeList_weight();
#pragma omp parallel default(none), shared(g, temp_graph_edgeList_tail, \
temp_graph_edgeList_weight, remoteComm_to_currComm_map_v), \
private(storedAlready)
#pragma omp for schedule(guided)
for(int ii=0; ii<g.edgeList.size(); ii++) {
ClusterLocalMap edgeList_tail_map;
GraphElem temp_tail = g.edgeList[ii].tail;
temp_graph_edgeList_tail[ii] = temp_tail;
temp_graph_edgeList_weight[ii] = g.edgeList[ii].weight;
if(temp_tail < base || temp_tail >= bound) {
/// use remoteComm_to_currComm_map_v map instead
storedAlready = remoteComm_to_currComm_map_v.find(temp_tail);
if(storedAlready != edgeList_tail_map.end()) {
temp_graph_edgeList_tail[ii] = storedAlready->second;
}
}
}
#ifdef PRINT_TIMEDS
double t_remap = timer();
double time_remap = t_remap - t0;
std::cout << "me[" << me << "]; Time gpu_remap: " << time_remap << std::endl;
#endif
/// Get pointers to memory of device arrays
GraphElem* dev_currComm = gpu_graph.get_currComm();
GraphElem* dev_ModlocalTarget = gpu_graph.get_ModlocalTarget();
GraphWeight* dev_vDegree = gpu_graph.get_vDegree();
GraphWeight* dev_clusterWeight = gpu_graph.get_clusterWeight();
GraphElem* dev_edgeListIndexes = gpu_graph.get_edgeListIndexes();
GraphElem* dev_ModcurrComm = gpu_graph.get_ModcurrComm();
GraphElem* dev_localCinfo_size = gpu_graph.get_ModlocalCinfo_size();
GraphWeight* dev_localCinfo_degree = gpu_graph.get_ModlocalCinfo_degree();
GraphElem* dev_localCinfo_oComm = gpu_graph.get_ModlocalCinfo_oComm();
GraphElem* dev_graph_edgeList_tail = gpu_graph.get_edgeList_tail();
GraphWeight* dev_graph_edgeList_weight = gpu_graph.get_edgeList_weight();
GraphElem* dev_unique_comm_array = gpu_graph.get_unique_comm_array();
GraphWeight* dev_unique_weight_array = gpu_graph.get_unique_weight_array();
gpu_graph.cpyVecTodev(currComm, dev_currComm);
gpu_graph.cpyVecTodev(vDegree, dev_vDegree);
gpu_graph.cpyVecTodev(clusterWeight, dev_clusterWeight);
gpu_graph.cpyVecTodev(g.edgeListIndexes, dev_edgeListIndexes);
bool check_ModlocalCinfo_memory = gpu_graph.checkModCommMemory(size_ModlocalCinfo);
assert(check_ModlocalCinfo_memory);
bool check_ModlocalCinfoComm_memory = gpu_graph.checkModCommMemory(size_ModlocalCinfo_oComm);
assert(check_ModlocalCinfoComm_memory);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_size, dev_localCinfo_size, size_ModlocalCinfo);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_degree, dev_localCinfo_degree, size_ModlocalCinfo);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_oComm, dev_localCinfo_oComm, size_ModlocalCinfo_oComm);
gpu_graph.cpyArrayTodev(temp_graph_edgeList_tail, dev_graph_edgeList_tail,
(GraphElem)g.edgeList.size());
gpu_graph.cpyArrayTodev(temp_graph_edgeList_weight, dev_graph_edgeList_weight,
(GraphElem)g.edgeList.size());
bool check_ModcurrComm_memory = gpu_graph.checkModCommMemory(
(GraphElem)ModcurrComm.size());
assert(check_ModcurrComm_memory);
gpu_graph.cpyVecTodev(ModcurrComm, dev_ModcurrComm);
GraphElem* dev_GraphEdge_low = gpu_graph.get_GraphEdge_low();
GraphElem* dev_GraphEdge_high = gpu_graph.get_GraphEdge_high();
/// allocate device memory for filling in comm and weights
GraphElem* dev_clmap_comm = gpu_graph.get_clmap_comm();
GraphWeight* dev_clmap_weight = gpu_graph.get_clmap_weight();
GraphElem clmapSize;
GraphElem* dev_clmap_loc = gpu_graph.get_clmap_loc();
GraphElem* dev_List_numEdges = gpu_graph.get_List_numEdges();
GraphElem* dev_list_lt_ts = gpu_graph.get_dev_list_lt_ts();
GraphElem* dev_list_lt_cs1 = gpu_graph.get_dev_list_lt_cs1();
GraphElem* dev_list_lt_cs2 = gpu_graph.get_dev_list_lt_cs2();
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_dtrans = timer();
double time_dtrans = t_dtrans - t0;
std::cout << "me[" << me << "]; Time gpu_dtrans: " << time_dtrans << std::endl;
#endif
if(numIters == 1)
{
CUDA_SAFE(hipMemset(dev_GraphEdge_low, 0, mem_size_GraphElem_nv));
CUDA_SAFE(hipMemset(dev_GraphEdge_high, 0, mem_size_GraphElem_nv));
dim3 numBlocks01( (nv-1) / L_THREADBLOCK_SIZE + 1);
dim3 Block_dim01(L_THREADBLOCK_SIZE);
hipLaunchKernelGGL(( gpu_distExecuteLouvainIteration), dim3(numBlocks01),dim3(Block_dim01), 0, 0,
nv,
dev_edgeListIndexes,
dev_GraphEdge_low, dev_GraphEdge_high,
me, base, bound);
CUDA_SAFE(hipMemset(dev_List_numEdges, 0, mem_size_GraphElem_nv));
CUDA_SAFE(hipMemset(dev_clmap_loc, 0, mem_size_GraphElem_nv));
GraphElem* dev_NumClusters = gpu_graph.get_NumClusters();
CUDA_SAFE(hipMemset(dev_NumClusters, 0, sizeof(GraphElem)));
GraphElem* dev_size_clmap = gpu_graph.get_size_clmap();
CUDA_SAFE(hipMemset(dev_size_clmap, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_ts = gpu_graph.get_dev_size_lt_ts();
CUDA_SAFE(hipMemset(dev_size_lt_ts, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_cs1 = gpu_graph.get_dev_size_lt_cs1();
CUDA_SAFE(hipMemset(dev_size_lt_cs1, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_cs2 = gpu_graph.get_dev_size_lt_cs2();
CUDA_SAFE(hipMemset(dev_size_lt_cs2, 0, sizeof(GraphElem)));
CUDA_SAFE(hipMemset(dev_list_lt_ts, 0, mem_size_GraphElem_nv));
CUDA_SAFE(hipMemset(dev_list_lt_cs1, 0, mem_size_GraphElem_nv));
CUDA_SAFE(hipMemset(dev_list_lt_cs2, 0, mem_size_GraphElem_nv));
dim3 numBlocks02( (nv-1) / MS_THREADBLOCK_SIZE + 1);
dim3 Block_dim02(MS_THREADBLOCK_SIZE);
hipLaunchKernelGGL(( count_size_clmap<PHY_WRP_SZ>), dim3(numBlocks02),dim3(Block_dim02), 0, 0, nv, dev_NumClusters,
dev_clmap_loc, dev_size_clmap,
dev_size_lt_ts, dev_list_lt_ts,
dev_size_lt_cs1, dev_list_lt_cs1,
dev_size_lt_cs2, dev_list_lt_cs2,
dev_GraphEdge_low, dev_GraphEdge_high, dev_List_numEdges);
/// copy to host number of clusters and size of cluster map memory
#ifdef DEBUG_CUVITE
GraphElem NumClusters = 0;
CUDA_SAFE(hipMemcpy(&NumClusters, dev_NumClusters,
sizeof(GraphElem), hipMemcpyDeviceToHost));
std::cout << "me[" << me << "]; NumClusters[ " << NumClusters << "]" << std::endl;
#endif
CUDA_SAFE(hipMemcpy(&clmapSize, dev_size_clmap,
sizeof(GraphElem), hipMemcpyDeviceToHost));
gpu_graph.set_clmapSize(clmapSize);
dev_clmap_comm = gpu_graph.getDevMem_clmapComm(clmapSize);
dev_clmap_weight = gpu_graph.getDevMem_clmapWeight(clmapSize);
gpu_graph.set_size_lt_ts();
gpu_graph.set_size_lt_cs1();
gpu_graph.set_size_lt_cs2();
}
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_iter1 = timer();
double time_iter1 = t_iter1 - t0;
std::cout << "me[" << me << "]; Time gpu_iter1: " << time_iter1 << std::endl;
#endif
GraphElem size_lt_ts = gpu_graph.get_size_lt_ts();
GraphElem size_lt_cs1 = gpu_graph.get_size_lt_cs1();
GraphElem size_lt_cs2 = gpu_graph.get_size_lt_cs2();
clmapSize = gpu_graph.get_clmapSize();
CUDA_SAFE(hipMemset(dev_clmap_comm, 0,
clmapSize * sizeof(GraphElem)));
CUDA_SAFE(hipMemset(dev_clmap_weight, 0,
clmapSize * sizeof(GraphWeight)));
GraphWeight* dev_selfLoopVec = gpu_graph.get_selfLoopVec();
CUDA_SAFE(hipMemset(dev_selfLoopVec, 0, mem_size_GraphWeight_nv));
dim3 numBlocks03( (num_vertex_gpu-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim03(S_THREADBLOCK_SIZE);
hipLaunchKernelGGL(( distBuildLocalMapCounter<PHY_WRP_SZ>), dim3(numBlocks03),dim3(Block_dim03), 0, 0,
num_vertex_gpu, dev_GraphEdge_low, dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_List_numEdges, dev_selfLoopVec,
base); // , bound);
GraphElem* dev_uniq_clus_vec = gpu_graph.get_uniq_clus_vec();
CUDA_SAFE(hipMemset(dev_uniq_clus_vec, 0, mem_size_GraphElem_nv));
GraphWeight* dev_counter = gpu_graph.get_counter();
CUDA_SAFE(hipMemset(dev_counter, 0, mem_size_GraphWeight_nv));
const int num_streams = 2;
hipStream_t streams[num_streams];
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
CUDA_SAFE(hipStreamCreate(&streams[i_streams]) );
}
dim3 numBlocks05( (num_vertex_gpu-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim05(S_THREADBLOCK_SIZE);
hipLaunchKernelGGL(( distGetMaxIndex<PHY_WRP_SZ>), dim3(numBlocks05),dim3(Block_dim05), 0, streams[0],
num_vertex_gpu,
dev_GraphEdge_low, // dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base); // , bound);
#if 0
hipLaunchKernelGGL(( distGetMaxIndex_large<PHY_WRP_SZ>), dim3(numBlocks05),dim3(Block_dim05), 0, streams[0],
num_vertex_gpu,
dev_GraphEdge_low, dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base, bound);
#else
if(size_lt_cs2 > 0) {
dim3 numBlocks052(FINDING_UNIQCOMM_NUM_BLOCKS);
dim3 Block_dim052(FINDING_UNIQCOMM_BLOCK_SIZE);
GraphElem nv_chunk_size;
nv_chunk_size = (size_lt_cs2 - 1) / FINDING_UNIQCOMM_NUM_BLOCKS + 1;
assert(ModcurrComm.size() <= FINDING_UNIQCOMM_ARRAY_SIZE);
hipLaunchKernelGGL(( distGetMaxIndex_large_new<PHY_WRP_SZ>), dim3(numBlocks052),dim3(Block_dim052), 0, streams[0],
me, numIters,
num_vertex_gpu, nv_chunk_size,
size_lt_cs2, dev_list_lt_cs2,
ModcurrComm.size(), // size_ModlocalCinfo,
dev_unique_comm_array,
dev_unique_weight_array,
dev_GraphEdge_low, // dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base); // , bound);
}
#endif
CUDA_SAFE(hipMemcpy(dev_ModlocalTarget, dev_ModcurrComm,
sizeof(GraphElem)*ModcurrComm.size(), hipMemcpyDeviceToDevice));
dim3 numBlocks06( (num_vertex_gpu-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim06(S_THREADBLOCK_SIZE);
hipLaunchKernelGGL(( computeMaxIndex<PHY_WRP_SZ>), dim3(numBlocks06),dim3(Block_dim06), 0, streams[0],
// nv,
num_vertex_gpu,
dev_currComm,
dev_ModcurrComm,
dev_localCinfo_size,
dev_localCinfo_degree,
dev_localCinfo_oComm,
dev_selfLoopVec,
dev_uniq_clus_vec, dev_counter,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_vDegree,
dev_ModlocalTarget,
dev_clusterWeight,
constantForSecondTerm,
base, bound);
hipLaunchKernelGGL(( computeMaxIndex_large<PHY_WRP_SZ>), dim3(numBlocks06),dim3(Block_dim06), 0, streams[0],
// nv,
num_vertex_gpu,
dev_currComm,
dev_ModcurrComm,
dev_localCinfo_size,
dev_localCinfo_degree,
dev_localCinfo_oComm,
dev_selfLoopVec,
dev_uniq_clus_vec, dev_counter,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_vDegree,
dev_ModlocalTarget,
dev_clusterWeight,
constantForSecondTerm,
base, bound);
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_kernels = timer();
double time_kernels = t_kernels - t0;
std::cout << "me[" << me << "]; Time gpu_kernels: " << time_iter1 << std::endl;
#endif
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
hipStreamSynchronize(streams[i_streams]);
}
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
hipStreamDestroy(streams[i_streams]);
}
/// Copy Targets to Host
CUDA_SAFE(hipMemcpy(&temp_targetComm_gpu[0],
dev_ModlocalTarget,
(num_vertex_gpu*sizeof(GraphElem)), hipMemcpyDeviceToHost));
/// Copy clusterWeight to Host
CUDA_SAFE(hipMemcpy(&clusterWeight[0],
dev_clusterWeight,
(num_vertex_gpu*sizeof(GraphWeight)), hipMemcpyDeviceToHost));
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_kd2h = timer();
double time_kd2h = t_kd2h - t0;
std::cout << "me[" << me << "]; Time gpu_kd2h: " << time_kd2h << std::endl;
#endif
std::vector<GraphElem>().swap(ModcurrComm);
double t1 = timer();
time_gpu = t1 - t0;
#ifdef PRINT_HYBRID
std::cout << "me[" << me << "]; Time GPU: " << time_gpu << std::endl;
#endif
} // if (num_vertex_gpu > 0) condition
} // close gpu secton
} // close parallel
memcpy(&temp_targetComm_gpu[0+num_vertex_gpu],
&temp_targetComm_cpu[0+num_vertex_gpu],
num_vertex_cpu*sizeof(GraphElem));
omp_set_num_threads(14);
updateLocalTarget_gpu (
nv,
currComm,
targetComm,
vDegree,
remoteCupdate,
temp_targetComm_gpu,
localCupdate,
base, bound, numIters);
std::vector<GraphElem>().swap(temp_targetComm_cpu);
std::vector<GraphElem>().swap(temp_targetComm_gpu);
#else // else option runs GPU code below
omp_set_num_threads(14);
double t0 = timer();
// GPU only code
/// size equal to nv X GraphElem
int mem_size_GraphElem_nv = sizeof(GraphElem) * nv;
/// size equal to nv X GraphWeight
int mem_size_GraphWeight_nv = sizeof(GraphWeight) * nv;
GraphElem size_localCinfo = localCinfo.size();
/// split localCinfo into vectors for different struct elements
GraphElem* temp_ModlocalCinfo_size = gpu_graph.getPinned_ModlocalCinfo_size();
GraphWeight* temp_ModlocalCinfo_degree = gpu_graph.getPinned_ModlocalCinfo_degree();
#pragma omp parallel default(none), \
shared(localCinfo, temp_ModlocalCinfo_size, temp_ModlocalCinfo_degree)
#pragma omp for schedule(guided)
for(int ii=0; ii<localCinfo.size(); ii++) {
temp_ModlocalCinfo_size[ii] = localCinfo[ii].size;
temp_ModlocalCinfo_degree[ii] = localCinfo[ii].degree;
}
/// Remote Community Info
/// First get the keys of remoteCinfo map
std::vector<GraphElem> temp_remoteCinfo_key = extract_keys_CommMap(remoteCinfo);
GraphElem size_remoteCinfo = remoteCinfo.size();
/// split RemoteCinfo into vectors for different struct elements
std::vector<GraphElem>temp_remoteCinfo_size =
extract_value_CommMap_size(remoteCinfo);
std::vector<GraphWeight>temp_remoteCinfo_degree =
extract_value_CommMap_degree(remoteCinfo);
/// now modify currComm to include remoteComm
GraphElem* temp_ModlocalCinfo_oComm = gpu_graph.getPinned_ModlocalCinfo_oComm();
ClusterLocalMap localCinfo_to_remoteCinfo_map;
ClusterLocalMap::const_iterator storedAlready;
GraphElem temp_counter_01 = 0;
std::vector<GraphElem> ModcurrComm = currComm;
for(int ii=0; ii<temp_remoteCinfo_key.size(); ii++) {
GraphElem temp_Comm = temp_remoteCinfo_key[ii];
if(temp_Comm < base || temp_Comm >= bound)
{
storedAlready = localCinfo_to_remoteCinfo_map.find(temp_Comm);
if(storedAlready == localCinfo_to_remoteCinfo_map.end()) {
localCinfo_to_remoteCinfo_map.insert(std::make_pair(
temp_Comm, (temp_counter_01+bound)));
temp_ModlocalCinfo_size[size_localCinfo+temp_counter_01] = temp_remoteCinfo_size[ii];
temp_ModlocalCinfo_degree[size_localCinfo+temp_counter_01] = temp_remoteCinfo_degree[ii];
temp_ModlocalCinfo_oComm[temp_counter_01] = temp_Comm;
temp_counter_01++;
}
}
}
GraphElem size_ModlocalCinfo = size_localCinfo+temp_counter_01;
GraphElem size_ModlocalCinfo_oComm = temp_counter_01;
std::vector<GraphElem>().swap(temp_remoteCinfo_key);
CommunityVector().swap(temp_remoteCinfo_size);
GraphWeightVector().swap(temp_remoteCinfo_degree);
// remoteComm is broken into 2 arrays
std::vector<GraphElem> temp_remoteComm_v;
temp_remoteComm_v = extract_vertex_VertexCommMap(remoteComm);
std::vector<GraphElem> temp_remoteComm_comm;
temp_remoteComm_comm = extract_comm_VertexCommMap(remoteComm);
/// Create map for remoteComm tail mapped to currComm
ClusterLocalMap remoteComm_to_currComm_map_v;
ClusterLocalMap::const_iterator storedAlready_v;
ClusterLocalMap::const_iterator storedAlready_comm;
GraphElem temp_counter_02 = 0;
GraphElem temp_tail;
GraphElem temp_comm, temp_comm_mapped;
// First modify currComm
#pragma omp parallel default(none), \
shared(ModcurrComm, localCinfo_to_remoteCinfo_map), \
private(temp_comm, storedAlready_comm)
#pragma omp for schedule(guided)
for(int ii = 0; ii < ModcurrComm.size(); ii++) {
temp_comm = ModcurrComm[ii];
if(temp_comm < base || temp_comm >= bound) {
storedAlready_comm = localCinfo_to_remoteCinfo_map.find(temp_comm);
ModcurrComm[ii] = storedAlready_comm->second;
}
}
// Next modify currComm to include remoteComm
for(int ii=0; ii<temp_remoteComm_comm.size(); ii++) {
temp_comm = temp_remoteComm_comm[ii];
temp_tail = temp_remoteComm_v[ii];
if(temp_comm < base || temp_comm >= bound) {
storedAlready_comm = localCinfo_to_remoteCinfo_map.find(temp_comm);
temp_comm_mapped = storedAlready_comm->second;
temp_remoteComm_comm[ii] = temp_comm_mapped;
ModcurrComm.push_back(temp_comm_mapped);
} else {
ModcurrComm.push_back(temp_comm);
/// check line below
}
if(temp_tail < base || temp_tail >= bound) {
storedAlready_v = remoteComm_to_currComm_map_v.find(temp_tail);
if(storedAlready_v == remoteComm_to_currComm_map_v.end()) {
if(temp_tail < base || temp_tail >= bound) {
remoteComm_to_currComm_map_v.insert(std::make_pair(
temp_tail, (bound + temp_counter_02) ));
temp_remoteComm_v[ii] = bound + temp_counter_02;
}
temp_counter_02++;
}
}
}
std::vector<GraphElem>().swap(temp_remoteComm_v);
std::vector<GraphElem>().swap(temp_remoteComm_comm);
// comm_node_info remote_comm_info;
const Graph &g = dg.getLocalGraph();
GraphElem size_edgeListIndexes = g.edgeListIndexes.size();
GraphElem* temp_graph_edgeList_tail = gpu_graph.getPinned_edgeList_tail();
GraphWeight* temp_graph_edgeList_weight = gpu_graph.getPinned_edgeList_weight();
#pragma omp parallel default(none), shared(g, temp_graph_edgeList_tail, \
temp_graph_edgeList_weight, remoteComm_to_currComm_map_v), \
private(storedAlready)
#pragma omp for schedule(guided)
for(int ii=0; ii<g.edgeList.size(); ii++) {
ClusterLocalMap edgeList_tail_map;
GraphElem temp_tail = g.edgeList[ii].tail;
temp_graph_edgeList_tail[ii] = temp_tail;
temp_graph_edgeList_weight[ii] = g.edgeList[ii].weight;
if(temp_tail < base || temp_tail >= bound) {
/// use remoteComm_to_currComm_map_v map instead
storedAlready = remoteComm_to_currComm_map_v.find(temp_tail);
if(storedAlready != edgeList_tail_map.end()) {
temp_graph_edgeList_tail[ii] = storedAlready->second;
}
}
}
#ifdef PRINT_TIMEDS
double t_remap = timer();
double time_remap = t_remap - t0;
std::cout << "me[" << me << "]; Time GPU_remap: " << time_remap << std::endl;
#endif
/// Get pointers to memory of device arrays
GraphElem* dev_currComm = gpu_graph.get_currComm();
GraphElem* dev_ModlocalTarget = gpu_graph.get_ModlocalTarget();
GraphWeight* dev_vDegree = gpu_graph.get_vDegree();
GraphWeight* dev_clusterWeight = gpu_graph.get_clusterWeight();
GraphElem* dev_edgeListIndexes = gpu_graph.get_edgeListIndexes();
GraphElem* dev_ModcurrComm = gpu_graph.get_ModcurrComm();
GraphElem* dev_localCinfo_size = gpu_graph.get_ModlocalCinfo_size();
GraphWeight* dev_localCinfo_degree = gpu_graph.get_ModlocalCinfo_degree();
GraphElem* dev_localCinfo_oComm = gpu_graph.get_ModlocalCinfo_oComm();
GraphElem* dev_graph_edgeList_tail = gpu_graph.get_edgeList_tail();
GraphWeight* dev_graph_edgeList_weight = gpu_graph.get_edgeList_weight();
GraphElem* dev_unique_comm_array = gpu_graph.get_unique_comm_array();
GraphWeight* dev_unique_weight_array = gpu_graph.get_unique_weight_array();
gpu_graph.cpyVecTodev(currComm, dev_currComm);
gpu_graph.cpyVecTodev(vDegree, dev_vDegree);
gpu_graph.cpyVecTodev(clusterWeight, dev_clusterWeight);
#ifdef DEBUG_CUVITE
std::cout << "nv[" << nv << "]; size_edgeListIndexes["
<< g.edgeListIndexes.size() << "]" << std::endl;
#endif
gpu_graph.cpyVecTodev(g.edgeListIndexes, dev_edgeListIndexes);
#ifdef DEBUG_CUVITE
std::cout << "nv[" << nv << "]; size_ModlocalCinfo["
<< size_ModlocalCinfo << "]; size_ModlocalCinfo_oComm["
<< size_ModlocalCinfo_oComm << "]" << std::endl;
#endif
bool check_ModlocalCinfo_memory = gpu_graph.checkModCommMemory(size_ModlocalCinfo);
assert(check_ModlocalCinfo_memory);
bool check_ModlocalCinfoComm_memory = gpu_graph.checkModCommMemory(size_ModlocalCinfo_oComm);
assert(check_ModlocalCinfoComm_memory);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_size, dev_localCinfo_size, size_ModlocalCinfo);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_degree, dev_localCinfo_degree, size_ModlocalCinfo);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_oComm, dev_localCinfo_oComm, size_ModlocalCinfo_oComm);
gpu_graph.cpyArrayTodev(temp_graph_edgeList_tail, dev_graph_edgeList_tail,
(GraphElem)g.edgeList.size());
gpu_graph.cpyArrayTodev(temp_graph_edgeList_weight, dev_graph_edgeList_weight,
(GraphElem)g.edgeList.size());
#ifdef DEBUG_CUVITE
std::cout << "nv[" << nv << "]; size_ModcurrComm["
<< ModcurrComm.size() << "]" << std::endl;
#endif
bool check_ModcurrComm_memory = gpu_graph.checkModCommMemory(
(GraphElem)ModcurrComm.size());
assert(check_ModcurrComm_memory);
gpu_graph.cpyVecTodev(ModcurrComm, dev_ModcurrComm);
GraphElem* dev_GraphEdge_low = gpu_graph.get_GraphEdge_low();
GraphElem* dev_GraphEdge_high = gpu_graph.get_GraphEdge_high();
/// allocate device memory for filling in comm and weights
GraphElem* dev_clmap_comm = gpu_graph.get_clmap_comm();
GraphWeight* dev_clmap_weight = gpu_graph.get_clmap_weight();
GraphElem clmapSize;
GraphElem* dev_clmap_loc = gpu_graph.get_clmap_loc();
GraphElem* dev_List_numEdges = gpu_graph.get_List_numEdges();
GraphElem* dev_list_lt_ts = gpu_graph.get_dev_list_lt_ts();
GraphElem* dev_list_lt_cs1 = gpu_graph.get_dev_list_lt_cs1();
GraphElem* dev_list_lt_cs2 = gpu_graph.get_dev_list_lt_cs2();
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_dtrans = timer();
double time_dtrans = t_dtrans - t0;
std::cout << "me[" << me << "]; Time GPU_dtrans: " << time_dtrans << std::endl;
#endif
if(numIters == 1)
{
CUDA_SAFE(hipMemset(dev_GraphEdge_low, 0, mem_size_GraphElem_nv));
CUDA_SAFE(hipMemset(dev_GraphEdge_high, 0, mem_size_GraphElem_nv));
dim3 numBlocks01( (nv-1) / L_THREADBLOCK_SIZE + 1);
dim3 Block_dim01(L_THREADBLOCK_SIZE);
hipLaunchKernelGGL(( gpu_distExecuteLouvainIteration), dim3(numBlocks01),dim3(Block_dim01), 0, 0,
nv,
dev_edgeListIndexes,
dev_GraphEdge_low, dev_GraphEdge_high,
me, base, bound);
CUDA_SAFE(hipMemset(dev_List_numEdges, 0, mem_size_GraphElem_nv));
CUDA_SAFE(hipMemset(dev_clmap_loc, 0, mem_size_GraphElem_nv));
GraphElem* dev_NumClusters = gpu_graph.get_NumClusters();
CUDA_SAFE(hipMemset(dev_NumClusters, 0, sizeof(GraphElem)));
GraphElem* dev_size_clmap = gpu_graph.get_size_clmap();
CUDA_SAFE(hipMemset(dev_size_clmap, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_ts = gpu_graph.get_dev_size_lt_ts();
CUDA_SAFE(hipMemset(dev_size_lt_ts, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_cs1 = gpu_graph.get_dev_size_lt_cs1();
CUDA_SAFE(hipMemset(dev_size_lt_cs1, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_cs2 = gpu_graph.get_dev_size_lt_cs2();
CUDA_SAFE(hipMemset(dev_size_lt_cs2, 0, sizeof(GraphElem)));
CUDA_SAFE(hipMemset(dev_list_lt_ts, 0, mem_size_GraphElem_nv));
CUDA_SAFE(hipMemset(dev_list_lt_cs1, 0, mem_size_GraphElem_nv));
CUDA_SAFE(hipMemset(dev_list_lt_cs2, 0, mem_size_GraphElem_nv));
dim3 numBlocks02( (nv-1) / MS_THREADBLOCK_SIZE + 1);
dim3 Block_dim02(MS_THREADBLOCK_SIZE);
hipLaunchKernelGGL(( count_size_clmap<PHY_WRP_SZ>), dim3(numBlocks02),dim3(Block_dim02), 0, 0, nv, dev_NumClusters,
dev_clmap_loc, dev_size_clmap,
dev_size_lt_ts, dev_list_lt_ts,
dev_size_lt_cs1, dev_list_lt_cs1,
dev_size_lt_cs2, dev_list_lt_cs2,
dev_GraphEdge_low, dev_GraphEdge_high, dev_List_numEdges);
/// copy to host number of clusters and size of cluster map memory
#ifdef DEBUG_CUVITE
GraphElem NumClusters = 0;
CUDA_SAFE(hipMemcpy(&NumClusters, dev_NumClusters,
sizeof(GraphElem), hipMemcpyDeviceToHost));
#endif
CUDA_SAFE(hipMemcpy(&clmapSize, dev_size_clmap,
sizeof(GraphElem), hipMemcpyDeviceToHost));
gpu_graph.set_clmapSize(clmapSize);
dev_clmap_comm = gpu_graph.getDevMem_clmapComm(clmapSize);
dev_clmap_weight = gpu_graph.getDevMem_clmapWeight(clmapSize);
gpu_graph.set_size_lt_ts();
gpu_graph.set_size_lt_cs1();
gpu_graph.set_size_lt_cs2();
}
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_iter1 = timer();
double time_iter1 = t_iter1 - t0;
std::cout << "me[" << me << "]; Time GPU_iter1: " << time_iter1 << std::endl;
#endif
GraphElem size_lt_ts = gpu_graph.get_size_lt_ts();
GraphElem size_lt_cs1 = gpu_graph.get_size_lt_cs1();
GraphElem size_lt_cs2 = gpu_graph.get_size_lt_cs2();
clmapSize = gpu_graph.get_clmapSize();
CUDA_SAFE(hipMemset(dev_clmap_comm, 0,
clmapSize * sizeof(GraphElem)));
CUDA_SAFE(hipMemset(dev_clmap_weight, 0,
clmapSize * sizeof(GraphWeight)));
GraphWeight* dev_selfLoopVec = gpu_graph.get_selfLoopVec();
CUDA_SAFE(hipMemset(dev_selfLoopVec, 0, mem_size_GraphWeight_nv));
dim3 numBlocks03( (nv-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim03(S_THREADBLOCK_SIZE);
hipLaunchKernelGGL(( distBuildLocalMapCounter<PHY_WRP_SZ>), dim3(numBlocks03),dim3(Block_dim03), 0, 0,
nv, dev_GraphEdge_low, dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_List_numEdges, dev_selfLoopVec,
base); // , bound);
GraphElem* dev_uniq_clus_vec = gpu_graph.get_uniq_clus_vec();
CUDA_SAFE(hipMemset(dev_uniq_clus_vec, 0, mem_size_GraphElem_nv));
GraphWeight* dev_counter = gpu_graph.get_counter();
CUDA_SAFE(hipMemset(dev_counter, 0, mem_size_GraphWeight_nv));
const int num_streams = 2;
hipStream_t streams[num_streams];
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
CUDA_SAFE(hipStreamCreate(&streams[i_streams]) );
}
dim3 numBlocks05( (nv-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim05(S_THREADBLOCK_SIZE);
hipLaunchKernelGGL(( distGetMaxIndex<PHY_WRP_SZ>), dim3(numBlocks05),dim3(Block_dim05), 0, 0,
nv,
dev_GraphEdge_low, // dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base); // , bound);
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_kernel11 = timer();
double time_kernel11 = t_kernel11 - t0;
std::cout << "me[" << me << "]; Time GPU_kernel11: " << time_kernel11 << std::endl;
#endif
#if 0
hipLaunchKernelGGL(( distGetMaxIndex_large<PHY_WRP_SZ>), dim3(numBlocks05),dim3(Block_dim05), 0, streams[1],
nv,
dev_GraphEdge_low, dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base, bound);
#else
if(size_lt_cs2 > 0) {
dim3 numBlocks052(FINDING_UNIQCOMM_NUM_BLOCKS);
dim3 Block_dim052(FINDING_UNIQCOMM_BLOCK_SIZE);
GraphElem nv_chunk_size;
nv_chunk_size = (size_lt_cs2 - 1) / FINDING_UNIQCOMM_NUM_BLOCKS + 1;
assert(ModcurrComm.size() <= FINDING_UNIQCOMM_ARRAY_SIZE);
hipLaunchKernelGGL(( distGetMaxIndex_large_new<PHY_WRP_SZ>), dim3(numBlocks052),dim3(Block_dim052), 0, streams[0],
me, numIters,
nv, nv_chunk_size,
size_lt_cs2, dev_list_lt_cs2,
ModcurrComm.size(), // size_ModlocalCinfo,
dev_unique_comm_array,
dev_unique_weight_array,
dev_GraphEdge_low, // dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base); // , bound);
}
#endif
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_kernel1 = timer();
double time_kernel1 = t_kernel1 - t0;
std::cout << "me[" << me << "]; Time GPU_kernel1: " << time_kernel1 << std::endl;
#endif
CUDA_SAFE(hipMemcpy(dev_ModlocalTarget, dev_ModcurrComm,
sizeof(GraphElem)*ModcurrComm.size(), hipMemcpyDeviceToDevice));
dim3 numBlocks06( (nv-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim06(S_THREADBLOCK_SIZE);
hipLaunchKernelGGL(( computeMaxIndex<PHY_WRP_SZ>), dim3(numBlocks06),dim3(Block_dim06), 0, 0,
nv,
dev_currComm,
dev_ModcurrComm,
dev_localCinfo_size,
dev_localCinfo_degree,
dev_localCinfo_oComm,
dev_selfLoopVec,
dev_uniq_clus_vec, dev_counter,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_vDegree,
dev_ModlocalTarget,
dev_clusterWeight,
constantForSecondTerm,
base, bound);
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_kernel21 = timer();
double time_kernel21 = t_kernel21 - t0;
std::cout << "me[" << me << "]; Time GPU_kernel21: " << time_kernel21 << std::endl;
#endif
hipLaunchKernelGGL(( computeMaxIndex_large<PHY_WRP_SZ>), dim3(numBlocks06),dim3(Block_dim06), 0, streams[0],
nv,
dev_currComm,
dev_ModcurrComm,
dev_localCinfo_size,
dev_localCinfo_degree,
dev_localCinfo_oComm,
dev_selfLoopVec,
dev_uniq_clus_vec, dev_counter,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_vDegree,
dev_ModlocalTarget,
dev_clusterWeight,
constantForSecondTerm,
base, bound);
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_kernels = timer();
double time_kernels = t_kernels - t0;
std::cout << "me[" << me << "]; Time GPU_kernels: " << time_kernels << std::endl;
#endif
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
hipStreamSynchronize(streams[i_streams]);
}
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
hipStreamDestroy(streams[i_streams]);
}
/// Copy Targets to Host
CUDA_SAFE(hipMemcpy(&ModcurrComm[0], dev_ModlocalTarget,
(ModcurrComm.size()*sizeof(GraphElem)), hipMemcpyDeviceToHost));
/// Copy clusterWeight to Host
CUDA_SAFE(hipMemcpy(&clusterWeight[0],
dev_clusterWeight,
(clusterWeight.size()*sizeof(GraphWeight)), hipMemcpyDeviceToHost));
#ifdef PRINT_TIMEDS
hipDeviceSynchronize();
double t_kd2h = timer();
double time_kd2h = t_kd2h - t0;
std::cout << "me[" << me << "]; Time GPU_kd2h: " << time_kd2h << std::endl;
#endif
updateLocalTarget_gpu (
nv,
currComm,
targetComm,
vDegree,
remoteCupdate,
ModcurrComm,
localCupdate,
base, bound, numIters);
#ifdef PRINT_TIMEDS
double t_locupd = timer();
double time_locupd = t_locupd - t0;
std::cout << "me[" << me << "]; Time GPU_locupd: " << time_locupd << std::endl;
#endif
std::vector<GraphElem>().swap(ModcurrComm);
#ifdef PRINT_TIMEDS
double t_all = timer();
double time_all = t_all - t0;
std::cout << "me[" << me << "]; Time GPU_all: " << time_all << std::endl;
#endif
#endif // end of option to run hybrid or GPU-only code
return 1;
}
| 39ce22b852a851284a7840fe3b812a1537166728.cu | #include "louvain_cuda.cuh"
#include "louvain_cuda_cpp_interface.hpp"
#include "GpuGraph.cuh"
#include <cstring>
#include <sstream>
#include <sys/time.h>
#include <time.h>
#include <cooperative_groups.h>
// #define PRINT_HYBRID
// #define PRINT_TIMEDS
// #define USE_HYBRID_CPU_GPU
#define L_THREADBLOCK_SIZE 512
#define M_THREADBLOCK_SIZE 256
// #define S_THREADBLOCK_SIZE 128
#define S_THREADBLOCK_SIZE 640
// #define MS_THREADBLOCK_SIZE 32
#define MS_THREADBLOCK_SIZE 512
#define ARRAY_REDUCE_THREADBLOCK_SIZE 32
// #define S_BLOCK_TILE ( ARRAY_REDUCE_THREADBLOCK_SIZE / PHY_WRP_SZ )
#define S_BLOCK_TILE ( S_THREADBLOCK_SIZE / PHY_WRP_SZ )
#define MS_BLOCK_TILE ( MS_THREADBLOCK_SIZE / PHY_WRP_SZ )
#define FINDING_UNIQCOMM_BLOCK_TILE ( FINDING_UNIQCOMM_BLOCK_SIZE / PHY_WRP_SZ )
#define CUT_SIZE_NUM_EDGES2 9400000000
// #define CUT_SIZE_NUM_EDGES1 4096
// #define CUT_SIZE_NUM_EDGES12 4096
#define CUT_SIZE_NUM_EDGES1 4096
#define CUT_SIZE_NUM_EDGES12 4096
// #define CUT_SIZE_NUM_EDGES1 3310720
namespace cg = cooperative_groups;
using namespace CuVite;
double timer ( void )
{
struct timeval tv;
struct timezone tz;
gettimeofday ( &tv, &tz );
return (tv.tv_sec + 0.000001 * tv.tv_usec);
}
void updateLocalTarget_gpu (
GraphElem nv,
const CommunityVector &currComm,
CommunityVector &targetComm,
const GraphWeightVector &vDegree,
CommMap &remoteCupdate,
CommunityVector &temp_targetComm,
CommVector &localCupdate,
const GraphElem base, const GraphElem bound, int numIters
) {
// omp_set_num_threads(7);
#pragma omp parallel default(none), shared(nv, localCupdate, currComm, \
targetComm, vDegree, remoteCupdate, \
temp_targetComm, numIters)
#pragma omp for schedule(guided)
for (int i = 0; i < nv; i++) {
GraphElem localTarget, ModlocalTarget;
bool currCommIsLocal;
bool targetCommIsLocal;
GraphElem cc = currComm[i];
if(cc >= base && cc < bound) currCommIsLocal = true;
ModlocalTarget = temp_targetComm[i];
localTarget = ModlocalTarget;
/// is the Target Local?
if (ModlocalTarget >= base && ModlocalTarget < bound) targetCommIsLocal = true;
/// Modify current if >= bound using stored map
if (cc < base || cc >= bound) {
currCommIsLocal = false;
}
/// Modify ModlocalTarget if >= bound using stored map
/// Stored map is no more required
if (ModlocalTarget < base || ModlocalTarget >= bound) {
targetCommIsLocal = false;
}
// std::cout << "GPU i[" << i << "]; cc[" << cc << "]; localTarget["
// << localTarget << "]" << std::endl;
// current and target comm are local - atomic updates to vectors
if((localTarget != cc) && (localTarget != -1) &&
currCommIsLocal && targetCommIsLocal) {
#ifdef DEBUG_PRINTF
assert( base < localTarget < bound);
assert( base < cc < bound);
assert( cc - base < localCupdate.size());
assert( (localTarget - base) < (GraphElem)localCupdate.size());
#endif
#pragma omp atomic update
localCupdate[localTarget-base].degree += vDegree[i];
#pragma omp atomic update
localCupdate[localTarget-base].size++;
#pragma omp atomic update
localCupdate[cc-base].degree -= vDegree[i];
#pragma omp atomic update
localCupdate[cc-base].size--;
}
/// current is local, target is not - do atomic on local,
/// accumulate in Maps for remote
if ((localTarget != cc) && (localTarget != -1) &&
currCommIsLocal && !targetCommIsLocal) {
#pragma omp atomic update
localCupdate[cc-base].degree -= vDegree[i];
#pragma omp atomic update
localCupdate[cc-base].size--;
/// Search target in remoteCupdate
CommMap::iterator iter=remoteCupdate.find(localTarget);
#pragma omp atomic update
iter->second.degree += vDegree[i];
#pragma omp atomic update
iter->second.size++;
}
/// current is remote, target is local
/// accumulate for current, atomic on local
if ((localTarget != cc) && (localTarget != -1) &&
!currCommIsLocal && targetCommIsLocal) {
#pragma omp atomic update
localCupdate[localTarget-base].degree += vDegree[i];
#pragma omp atomic update
localCupdate[localTarget-base].size++;
/// Search in remoteCupdate
CommMap::iterator iter=remoteCupdate.find(cc);
#pragma omp atomic update
iter->second.degree -= vDegree[i];
#pragma omp atomic update
iter->second.size--;
}
/// Current and target are remote - accumulate for both
if ((localTarget != cc) && (localTarget != -1) &&
!currCommIsLocal && !targetCommIsLocal) {
// search current
CommMap::iterator iter=remoteCupdate.find(cc);
#pragma omp atomic update
iter->second.degree -= vDegree[i];
#pragma omp atomic update
iter->second.size--;
// search target
iter=remoteCupdate.find(localTarget);
#pragma omp atomic update
iter->second.degree += vDegree[i];
#pragma omp atomic update
iter->second.size++;
}
#ifdef DEBUG_PRINTF
assert(localTarget != -1);
#endif
targetComm[i] = localTarget;
// std::cout << "GPU i[" << i << "]; cc[" << cc << "]; localTarget["
// << localTarget << "]" << std::endl;
}
}
__device__ GraphWeight weight_reduce(
cg::thread_group g, GraphWeight *x, GraphWeight val)
{
int lane = g.thread_rank();
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = g.size() / 2; i > 0; i /= 2) {
x[lane] = val; g.sync();
val += x[lane+i]; g.sync();
}
return val; // note: only thread 0 will return full sum
}
template <int tile_sz>
__device__ GraphWeight weight_reduce_sum_tile_shfl(
cg::thread_block_tile<tile_sz> g, GraphWeight val)
{
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = g.size() / 2; i > 0; i /= 2) {
val += g.shfl_down(val, i);
}
return val; // note: only thread 0 will return full sum
}
#ifndef USE_32_BIT_GRAPH
template <int tile_sz>
__device__ GraphElem reduce_sum_tile_shfl(
cg::thread_block_tile<tile_sz> g, GraphElem val)
{
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = g.size() / 2; i > 0; i /= 2) {
val += g.shfl_down(val, i);
}
return val; // note: only thread 0 will return full sum
}
#endif
template <int tile_sz>
__device__ int reduce_sum_tile_shfl(
cg::thread_block_tile<tile_sz> g, int val)
{
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = g.size() / 2; i > 0; i /= 2) {
val += g.shfl_down(val, i);
}
return val; // note: only thread 0 will return full sum
}
template<int tile_sz>
__global__
void computeMaxIndex_large_thb(
GraphElem nv,
GraphElem* ocurrComm, GraphElem* currComm,
GraphElem* localCinfo_size, GraphWeight* localCinfo_degree,
GraphElem* localCinfo_oComm,
GraphWeight* selfLoop,
GraphElem* uniq_clus_vec, GraphWeight* counter,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphWeight* vDegree_vec,
GraphElem* localTarget,
GraphWeight* clusterWeight,
const double constant,
const GraphElem base, const GraphElem bound
)
{
__shared__ int shared_num_uniq_cl[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_my_counter[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_curGain[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Index[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Size[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_maxGain[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxIndex[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxSize[S_BLOCK_TILE];
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
shared_num_uniq_cl[ii] = 0;
if(i < nv) {
shared_num_uniq_cl[ii] = uniq_clus_vec[i];
}
/// Create cooperative groups
auto thb_g = cg::this_thread_block();
// auto tileIdx = thb_g.thread_rank()/tile_sz;
#if __cuda_arch__ >= 700
auto tile = cg::partition<tile_sz>(cg::this_thread_block());
#else
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
#endif
auto tileIdx = thb_g.thread_rank()/tile.size();
unsigned ti = tileIdx*tile.size();
GraphWeight *my_counter = &t_my_counter[tileIdx * tile.size()];
GraphWeight *shared_curGain = &t_shared_curGain[tileIdx * tile.size()];
GraphElem *shared_Index = &t_shared_Index[tileIdx * tile.size()];
GraphElem *shared_Size = &t_shared_Size[tileIdx * tile.size()];
GraphWeight *shared_maxGain = &t_shared_maxGain[tileIdx];
GraphElem *shared_maxIndex = &t_shared_maxIndex[tileIdx];
GraphElem *shared_maxSize = &t_shared_maxSize[tileIdx];
GraphElem num_cluster = 0;
GraphWeight ay = 0.0, eiy = 0.0;
GraphWeight eix;
GraphElem size;
GraphWeight curGain = 0.0;
GraphWeight vDegree, ax;
GraphElem cc;
// GraphWeight my_counter;
GraphWeight currDegree;
tile.sync();
cg::sync(thb_g);
for( int wii = 0; wii < thb_g.size(); wii++) {
// num_cluster = shared_num_uniq_cl[tileIdx*tile.size()+wii];
num_cluster = shared_num_uniq_cl[wii];
// if(num_cluster >= (GraphElem)tile_sz)
if(num_cluster > CUT_SIZE_NUM_EDGES12) {
// if(tile.thread_rank() == 0) printf("num_cluster[%ld]; \n",
// num_cluster);
if(tile.thread_rank() == 0) shared_maxGain[0] = 0.0;
tile.sync();
__syncwarp();
shared_Index[tile.thread_rank()] = 0;
// GraphElem ver_loc = (GraphElem)(blockIdx.x*blockDim.x+tileIdx*tile.size()+wii);
GraphElem ver_loc = (GraphElem)(blockIdx.x*blockDim.x+wii);
cc = currComm[ver_loc];
if(tile.thread_rank() == 0)
{
if(cc >= bound) {
shared_maxIndex[0] = ocurrComm[ver_loc];
} else {
shared_maxIndex[0] = cc;
}
shared_maxSize[0] = localCinfo_size[cc - base];
}
tile.sync();
__syncwarp();
// my_counter[tile.thread_rank()] = counter[blockIdx.x*blockDim.x+tileIdx*tile.size()+wii];
my_counter[tile.thread_rank()] = counter[blockIdx.x*blockDim.x+wii];
eix = my_counter[tile.thread_rank()] - selfLoop[ver_loc];
vDegree = vDegree_vec[ver_loc];
currDegree = localCinfo_degree[cc - base];
ax = currDegree - vDegree;
// for(int k = 0; k < ((num_cluster-1)/tile.size()+1); k++)
for(int k = 0; k < ((num_cluster-1)/thb_g.size()+1); k++)
{
// GraphElem thread_indx = k*tile.size() + tile.thread_rank();
GraphElem thread_indx = k*thb_g.size() + thb_g.thread_rank();
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
if(thread_indx < num_cluster) {
cg::coalesced_group active = cg::coalesced_threads();
GraphElem tcomm = clmap_comm[
clmap_loc[ver_loc]+thread_indx];
ay = localCinfo_degree[tcomm - base];
eiy = clmap_weight[
clmap_loc[ver_loc]+thread_indx];
curGain = 2.0 * (eiy - eix) - 2.0 * vDegree * (ay - ax) * constant;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
if(tcomm >= bound) {
shared_Index[tile.thread_rank()] = localCinfo_oComm[tcomm - bound];
} else {
shared_Index[tile.thread_rank()] = tcomm;
}
if((curGain > shared_maxGain[0]) && tcomm != cc ||
(curGain == shared_maxGain[0] && curGain != 0.0 && tcomm != cc &&
shared_Index[tile.thread_rank()] < shared_maxIndex[0]) ) {
shared_curGain[tile.thread_rank()] = curGain;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
} else {
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
shared_Size[tile.thread_rank()] = 0;
}
/// Perform reduction
active.sync();
#pragma unroll
for (int s =1; s < tile.size(); s *=2)
{
int indx = 2 * s * tile.thread_rank();
// int indx = tileIdx*tile.size() + 2 * s * tile.thread_rank();
if(indx < tile.size()) {
if(shared_Index[indx+s] != -1) {
if((shared_curGain[indx+s] > shared_curGain[indx]) ||
(shared_Index[indx] == -1) ||
(shared_curGain[indx+s] == shared_curGain[indx] &&
shared_curGain[indx+s] != 0.0 && shared_Index[indx] != -1 &&
shared_Index[indx+s] < shared_Index[indx])
) {
shared_curGain[indx] = shared_curGain[indx+s];
shared_Index[indx] = shared_Index[indx+s];
shared_Size[indx] = shared_Size[indx+s];
}
} else if(shared_Index[indx+s] == -1 && shared_Index[indx] == -1) {
shared_curGain[indx] = 0.0;
shared_Index[indx] = -1;
shared_Size[indx] = 0;
}
}
active.sync();
}
if(tile.thread_rank() == 0) {
if(shared_curGain[0] > shared_maxGain[0] ||
shared_curGain[0] == shared_maxGain[0] &&
shared_curGain[0] != 0.0 &&
shared_Index[0] < shared_maxIndex[0]
) {
shared_maxGain[0] = shared_curGain[0];
shared_maxIndex[0] = shared_Index[0];
shared_maxSize[0] = shared_Size[0];
}
}
active.sync();
}
tile.sync();
__syncwarp();
}
thb_g.sync();
/// Perform reduction at threadblock level
for (int s =1; s < S_BLOCK_TILE; s *=2)
{
int indx = 2 * s * thb_g.thread_rank();
if(indx < S_BLOCK_TILE) {
// active = cg::coalesced_threads();
if(t_shared_maxIndex[indx+s] != -1) {
if((t_shared_maxGain[indx+s] > t_shared_maxGain[indx]) ||
(t_shared_maxIndex[indx] == -1) ||
(t_shared_maxGain[indx+s] == t_shared_maxGain[indx] &&
t_shared_maxGain[indx+s] != 0.0 && t_shared_maxIndex[indx] != -1 &&
t_shared_maxIndex[indx+s] < t_shared_maxIndex[indx])
) {
t_shared_maxGain[indx] = t_shared_maxGain[indx+s];
t_shared_maxIndex[indx] = t_shared_maxIndex[indx+s];
t_shared_maxSize[indx] = t_shared_maxSize[indx+s];
}
} else if(t_shared_maxIndex[indx+s] == -1 && t_shared_maxIndex[indx] == -1) {
t_shared_maxGain[indx] = 0.0;
t_shared_maxIndex[indx] = -1;
t_shared_maxSize[indx] = 0;
}
// active.sync();
}
}
thb_g.sync();
// if(tile.thread_rank() == 0)
if(thb_g.thread_rank() == 0)
{
GraphElem currSize = localCinfo_size[cc - base];
if(cc >= bound) cc = ocurrComm[ver_loc];
if((t_shared_maxSize[0] == 1) &&
(currSize == 1) &&
(t_shared_maxIndex[0] > cc)) {
t_shared_maxIndex[0] = cc;
}
clusterWeight[ver_loc] += counter[ver_loc];
localTarget[ver_loc] = t_shared_maxIndex[0];
}
thb_g.sync();
} // num_cluster loop
// tile.sync();
// __syncwarp();
thb_g.sync();
} // outer loop
}
template<int tile_sz>
__global__
void computeMaxIndex_large(
GraphElem nv,
GraphElem* ocurrComm, GraphElem* currComm,
GraphElem* localCinfo_size, GraphWeight* localCinfo_degree,
GraphElem* localCinfo_oComm,
GraphWeight* selfLoop,
GraphElem* uniq_clus_vec, GraphWeight* counter,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphWeight* vDegree_vec,
GraphElem* localTarget,
GraphWeight* clusterWeight,
const double constant,
const GraphElem base, const GraphElem bound
)
{
__shared__ int shared_num_uniq_cl[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_my_counter[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_curGain[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Index[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Size[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_maxGain[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxIndex[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxSize[S_BLOCK_TILE];
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
shared_num_uniq_cl[ii] = 0;
if(i < nv) {
shared_num_uniq_cl[ii] = uniq_clus_vec[i];
}
// if(i == 42609) printf("vertex[%ld]; num_clusters[%ld]\n", i, uniq_clus_vec[i]);
/// Create cooperative groups
auto g = cg::this_thread_block();
// auto tileIdx = g.thread_rank()/tile_sz;
#if __cuda_arch__ >= 700
auto tile = cg::partition<tile_sz>(cg::this_thread_block());
#else
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
#endif
auto tileIdx = g.thread_rank()/tile.size();
unsigned ti = tileIdx*tile.size();
GraphWeight *my_counter = &t_my_counter[tileIdx * tile.size()];
GraphWeight *shared_curGain = &t_shared_curGain[tileIdx * tile.size()];
GraphElem *shared_Index = &t_shared_Index[tileIdx * tile.size()];
GraphElem *shared_Size = &t_shared_Size[tileIdx * tile.size()];
GraphWeight *shared_maxGain = &t_shared_maxGain[tileIdx];
GraphElem *shared_maxIndex = &t_shared_maxIndex[tileIdx];
GraphElem *shared_maxSize = &t_shared_maxSize[tileIdx];
GraphElem num_cluster = 0;
GraphWeight ay = 0.0, eiy = 0.0;
GraphWeight eix;
GraphElem size;
GraphWeight curGain = 0.0;
GraphWeight vDegree, ax;
GraphElem cc;
// GraphWeight my_counter;
GraphWeight currDegree;
tile.sync();
for( int wii = 0; wii < tile.size(); wii++) {
num_cluster = shared_num_uniq_cl[tileIdx*tile.size()+wii];
if(num_cluster > CUT_SIZE_NUM_EDGES12) {
if(tile.thread_rank() == 0) shared_maxGain[0] = 0.0;
tile.sync();
__syncwarp();
shared_Index[tile.thread_rank()] = 0;
GraphElem ver_loc = (GraphElem)(blockIdx.x*blockDim.x+tileIdx*tile.size()+wii);
cc = currComm[ver_loc];
if(tile.thread_rank() == 0)
{
if(cc >= bound) {
shared_maxIndex[0] = ocurrComm[ver_loc];
} else {
shared_maxIndex[0] = cc;
}
shared_maxSize[0] = localCinfo_size[cc - base];
}
tile.sync();
__syncwarp();
my_counter[tile.thread_rank()] = counter[blockIdx.x*blockDim.x+tileIdx*tile.size()+wii];
eix = my_counter[tile.thread_rank()] - selfLoop[ver_loc];
vDegree = vDegree_vec[ver_loc];
currDegree = localCinfo_degree[cc - base];
ax = currDegree - vDegree;
for(int k = 0; k < ((num_cluster-1)/tile.size()+1); k++)
{
GraphElem thread_indx = k*tile.size() + tile.thread_rank();
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
if(thread_indx < num_cluster) {
cg::coalesced_group active = cg::coalesced_threads();
GraphElem tcomm = clmap_comm[
clmap_loc[ver_loc]+thread_indx];
ay = localCinfo_degree[tcomm - base];
eiy = clmap_weight[
clmap_loc[ver_loc]+thread_indx];
curGain = 2.0 * (eiy - eix) - 2.0 * vDegree * (ay - ax) * constant;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
if(tcomm >= bound) {
shared_Index[tile.thread_rank()] = localCinfo_oComm[tcomm - bound];
} else {
shared_Index[tile.thread_rank()] = tcomm;
}
if((curGain > shared_maxGain[0]) && tcomm != cc ||
(curGain == shared_maxGain[0] && curGain != 0.0 && tcomm != cc &&
shared_Index[tile.thread_rank()] < shared_maxIndex[0]) ) {
shared_curGain[tile.thread_rank()] = curGain;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
} else {
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
shared_Size[tile.thread_rank()] = 0;
}
/// Perform reduction
active.sync();
#pragma unroll
for (int s =1; s < tile.size(); s *=2)
{
int indx = 2 * s * tile.thread_rank();
// int indx = tileIdx*tile.size() + 2 * s * tile.thread_rank();
if(indx < tile.size()) {
if(shared_Index[indx+s] != -1) {
if((shared_curGain[indx+s] > shared_curGain[indx]) ||
(shared_Index[indx] == -1) ||
(shared_curGain[indx+s] == shared_curGain[indx] &&
shared_curGain[indx+s] != 0.0 && shared_Index[indx] != -1 &&
shared_Index[indx+s] < shared_Index[indx])
) {
shared_curGain[indx] = shared_curGain[indx+s];
shared_Index[indx] = shared_Index[indx+s];
shared_Size[indx] = shared_Size[indx+s];
}
} else if(shared_Index[indx+s] == -1 && shared_Index[indx] == -1) {
shared_curGain[indx] = 0.0;
shared_Index[indx] = -1;
shared_Size[indx] = 0;
}
}
active.sync();
}
if(tile.thread_rank() == 0) {
if(shared_curGain[0] > shared_maxGain[0] ||
shared_curGain[0] == shared_maxGain[0] &&
shared_curGain[0] != 0.0 &&
shared_Index[0] < shared_maxIndex[0]
) {
shared_maxGain[0] = shared_curGain[0];
shared_maxIndex[0] = shared_Index[0];
shared_maxSize[0] = shared_Size[0];
}
}
active.sync();
}
tile.sync();
__syncwarp();
}
if(tile.thread_rank() == 0) {
GraphElem currSize = localCinfo_size[cc - base];
if(cc >= bound) cc = ocurrComm[ver_loc];
if((shared_maxSize[0] == 1) &&
(currSize == 1) &&
(shared_maxIndex[0] > cc)) {
shared_maxIndex[0] = cc;
}
clusterWeight[ver_loc] += counter[ver_loc];
localTarget[ver_loc] = shared_maxIndex[0];
}
tile.sync();
__syncwarp();
}
tile.sync();
__syncwarp();
}
}
template<int tile_sz>
__global__
void computeMaxIndex(
GraphElem nv,
GraphElem* ocurrComm, GraphElem* currComm,
GraphElem* localCinfo_size, GraphWeight* localCinfo_degree,
GraphElem* localCinfo_oComm,
GraphWeight* selfLoop,
GraphElem* uniq_clus_vec, GraphWeight* counter,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphWeight* vDegree_vec,
GraphElem* localTarget,
GraphWeight* clusterWeight,
const double constant,
const GraphElem base, const GraphElem bound
)
{
__shared__ int shared_num_uniq_cl[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_my_counter[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_curGain[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Index[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_Size[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_maxGain[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxIndex[S_BLOCK_TILE];
__shared__ GraphElem t_shared_maxSize[S_BLOCK_TILE];
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
shared_num_uniq_cl[ii] = 0;
if(i < nv) {
shared_num_uniq_cl[ii] = uniq_clus_vec[i];
}
/// Create cooperative groups
auto g = cg::this_thread_block();
// auto tileIdx = g.thread_rank()/tile_sz;
#if __cuda_arch__ >= 700
auto tile = cg::partition<tile_sz>(cg::this_thread_block());
#else
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
#endif
auto tileIdx = g.thread_rank()/tile.size();
unsigned ti = tileIdx*tile.size();
GraphWeight *my_counter = &t_my_counter[tileIdx * tile.size()];
GraphWeight *shared_curGain = &t_shared_curGain[tileIdx * tile.size()];
GraphElem *shared_Index = &t_shared_Index[tileIdx * tile.size()];
GraphElem *shared_Size = &t_shared_Size[tileIdx * tile.size()];
GraphWeight *shared_maxGain = &t_shared_maxGain[tileIdx];
GraphElem *shared_maxIndex = &t_shared_maxIndex[tileIdx];
GraphElem *shared_maxSize = &t_shared_maxSize[tileIdx];
GraphElem num_cluster = 0;
GraphWeight ay = 0.0, eiy = 0.0;
GraphWeight eix;
GraphElem size;
GraphWeight curGain = 0.0;
GraphWeight vDegree, ax;
GraphElem cc;
// GraphWeight my_counter;
GraphWeight currDegree;
tile.sync();
for( int wii = 0; wii < tile.size(); wii++) {
num_cluster = shared_num_uniq_cl[tileIdx*tile.size()+wii];
if(num_cluster >= (GraphElem)tile_sz && num_cluster <= CUT_SIZE_NUM_EDGES12) {
if(tile.thread_rank() == 0) shared_maxGain[0] = 0.0;
tile.sync();
__syncwarp();
shared_Index[tile.thread_rank()] = 0;
GraphElem ver_loc = (GraphElem)(blockIdx.x*blockDim.x+tileIdx*tile.size()+wii);
cc = currComm[ver_loc];
if(tile.thread_rank() == 0)
{
if(cc >= bound) {
shared_maxIndex[0] = ocurrComm[ver_loc];
} else {
shared_maxIndex[0] = cc;
}
shared_maxSize[0] = localCinfo_size[cc - base];
}
tile.sync();
__syncwarp();
my_counter[tile.thread_rank()] = counter[blockIdx.x*blockDim.x+tileIdx*tile.size()+wii];
eix = my_counter[tile.thread_rank()] - selfLoop[ver_loc];
vDegree = vDegree_vec[ver_loc];
currDegree = localCinfo_degree[cc - base];
ax = currDegree - vDegree;
for(int k = 0; k < ((num_cluster-1)/tile.size()+1); k++)
{
GraphElem thread_indx = k*tile.size() + tile.thread_rank();
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
if(thread_indx < num_cluster) {
cg::coalesced_group active = cg::coalesced_threads();
GraphElem tcomm = clmap_comm[
clmap_loc[ver_loc]+thread_indx];
ay = localCinfo_degree[tcomm - base];
eiy = clmap_weight[
clmap_loc[ver_loc]+thread_indx];
curGain = 2.0 * (eiy - eix) - 2.0 * vDegree * (ay - ax) * constant;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
if(tcomm >= bound) {
shared_Index[tile.thread_rank()] = localCinfo_oComm[tcomm - bound];
} else {
shared_Index[tile.thread_rank()] = tcomm;
}
if((curGain > shared_maxGain[0]) && tcomm != cc ||
(curGain == shared_maxGain[0] && curGain != 0.0 && tcomm != cc &&
shared_Index[tile.thread_rank()] < shared_maxIndex[0]) ) {
shared_curGain[tile.thread_rank()] = curGain;
shared_Size[tile.thread_rank()] = localCinfo_size[tcomm - base];
} else {
shared_Index[tile.thread_rank()] = -1;
shared_curGain[tile.thread_rank()] = 0.0;
shared_Size[tile.thread_rank()] = 0;
}
/// Perform reduction
active.sync();
#pragma unroll
for (int s =1; s < tile.size(); s *=2)
{
int indx = 2 * s * tile.thread_rank();
// int indx = tileIdx*tile.size() + 2 * s * tile.thread_rank();
if(indx < tile.size()) {
if(shared_Index[indx+s] != -1) {
if((shared_curGain[indx+s] > shared_curGain[indx]) ||
(shared_Index[indx] == -1) ||
(shared_curGain[indx+s] == shared_curGain[indx] &&
shared_curGain[indx+s] != 0.0 && shared_Index[indx] != -1 &&
shared_Index[indx+s] < shared_Index[indx])
) {
shared_curGain[indx] = shared_curGain[indx+s];
shared_Index[indx] = shared_Index[indx+s];
shared_Size[indx] = shared_Size[indx+s];
}
} else if(shared_Index[indx+s] == -1 && shared_Index[indx] == -1) {
shared_curGain[indx] = 0.0;
shared_Index[indx] = -1;
shared_Size[indx] = 0;
}
}
active.sync();
}
if(tile.thread_rank() == 0) {
if(shared_curGain[0] > shared_maxGain[0] ||
shared_curGain[0] == shared_maxGain[0] &&
shared_curGain[0] != 0.0 &&
shared_Index[0] < shared_maxIndex[0]
) {
shared_maxGain[0] = shared_curGain[0];
shared_maxIndex[0] = shared_Index[0];
shared_maxSize[0] = shared_Size[0];
}
}
active.sync();
}
tile.sync();
__syncwarp();
}
if(tile.thread_rank() == 0) {
GraphElem currSize = localCinfo_size[cc - base];
if(cc >= bound) cc = ocurrComm[ver_loc];
if((shared_maxSize[0] == 1) &&
(currSize == 1) &&
(shared_maxIndex[0] > cc)) {
shared_maxIndex[0] = cc;
}
clusterWeight[ver_loc] += counter[ver_loc];
localTarget[ver_loc] = shared_maxIndex[0];
}
tile.sync();
__syncwarp();
}
tile.sync();
__syncwarp();
}
/// Now implement for vertices with num_clusters < 4
// if(i >= nv) return;
curGain = 0.0;
GraphWeight maxGain = 0.0;
num_cluster = shared_num_uniq_cl[ii];
if(num_cluster < (GraphElem)tile_sz && num_cluster > 0 && i < nv) {
cc = currComm[i];
GraphElem maxIndex;
if(cc >= bound) {
maxIndex = ocurrComm[i];
} else {
maxIndex = cc;
}
localTarget[i] = -1; // cc;
GraphElem currSize = localCinfo_size[cc - base];
currDegree = localCinfo_degree[cc - base];
GraphElem maxSize = currSize;
t_my_counter[ii] = counter[i];
eix = t_my_counter[ii] - selfLoop[i];
vDegree = vDegree_vec[i];
ax = currDegree - vDegree;
GraphElem tcomm, otcomm;
for(GraphElem k = 0; k < num_cluster; k++) {
tcomm = clmap_comm[clmap_loc[i]+k];
if (tcomm != cc) {
ay = localCinfo_degree[tcomm - base];
size = localCinfo_size[tcomm - base];
eiy = clmap_weight[clmap_loc[i]+k];
curGain = 2.0 * (eiy - eix) - 2.0 * vDegree * (ay - ax) * constant;
if(tcomm >= bound) {
otcomm = localCinfo_oComm[tcomm - bound];
} else {
otcomm = tcomm;
}
if((curGain > maxGain) ||
(curGain == maxGain && curGain != 0.0 && otcomm < maxIndex) ) {
maxGain = curGain;
maxIndex = otcomm;
maxSize = size;
}
}
}
if(cc >= bound) cc = ocurrComm[i];
if((maxSize == 1) && (currSize == 1) && (maxIndex > cc)) {
maxIndex = cc;
}
clusterWeight[i] += counter[i];
localTarget[i] = maxIndex;
} else if(num_cluster == 0 && i < nv) {
localTarget[i] = ocurrComm[i];
}
}
template<int tile_sz>
__global__
void distGetMaxIndex_large_new(
const int me, const int numIters,
const GraphElem nv, GraphElem nv_chunk_size,
const GraphElem size_lt_cs2, GraphElem* list_lt_cs2,
GraphElem max_comm_size,
GraphElem* unique_comm_array_g,
GraphWeight* unique_weight_array_g,
GraphElem* e0, // GraphElem* e1,
GraphElem* graph_edgeList_tail, GraphWeight* graph_edgeList_weight,
GraphElem* currComm,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphElem* List_numEdges,
GraphElem* uniq_clus_vec, GraphWeight* counter,
const GraphElem base // , const GraphElem bound
) {
// __shared__ GraphElem t_shared_begin_loc[FINDING_UNIQCOMM_BLOCK_TILE];
// __shared__ GraphElem t_shared_comm[FINDING_UNIQCOMM_BLOCK_SIZE];
// __shared__ GraphWeight t_shared_weight[FINDING_UNIQCOMM_BLOCK_SIZE];
/// Create cooperative groups
auto thb_g = cg::this_thread_block();
auto tileIdx = thb_g.thread_rank()/tile_sz;
// GraphElem *shared_begin_loc = &t_shared_begin_loc[tileIdx];
// GraphElem *shared_comm = &t_shared_comm[tileIdx];
// GraphWeight* shared_weight = &t_shared_weight[tileIdx];
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
int ii = threadIdx.x;
GraphElem* unique_comm_array =
&unique_comm_array_g[blockIdx.x*FINDING_UNIQCOMM_ARRAY_SIZE];
GraphWeight* unique_weight_array =
&unique_weight_array_g[blockIdx.x*FINDING_UNIQCOMM_ARRAY_SIZE];
for(GraphElem i_nv = 0; i_nv < nv_chunk_size; ++i_nv) {
GraphElem i_index = FINDING_UNIQCOMM_NUM_BLOCKS * i_nv + blockIdx.x;
if(i_index < size_lt_cs2) {
GraphElem i = list_lt_cs2[i_index];
if(i < nv) {
GraphElem cc = currComm[i];
GraphElem num_edges = List_numEdges[i];
if(num_edges > CUT_SIZE_NUM_EDGES1) {
GraphElem clmap_loc_i = clmap_loc[i];
// if(threadIdx.x == 0)
// printf("me[%d]; blockIdx.x[%d]; i[%ld]; base[%ld]\n", me, blockIdx.x, i, base);
//
for(GraphElem k = 0; k < ((max_comm_size*FINDING_UNIQCOMM_FACTOR-1)/thb_g.size()+1); k++) {
// GraphElem thread_indx = k*tile.size() + tile.thread_rank();
GraphElem thread_indx = k*thb_g.size() + thb_g.thread_rank();
if(thread_indx < max_comm_size*FINDING_UNIQCOMM_FACTOR) {
unique_comm_array[thread_indx] = -1;
unique_weight_array[thread_indx] = 0.0;
}
}
thb_g.sync();
// if(thb_g.thread_rank() == 0)
// printf("me[%d]; i[%ld]; num_edges[%ld]\n", me, i, num_edges);
//
for(GraphElem k = 0; k < ((num_edges-1)/thb_g.size()+1); k++) {
// GraphElem thread_indx = k*tile.size() + tile.thread_rank();
GraphElem thread_indx = k*thb_g.size() + thb_g.thread_rank();
if(thread_indx < num_edges) {
GraphElem th_tail_indx = e0[i]+thread_indx;
GraphElem tail = graph_edgeList_tail[th_tail_indx];
// unique_comm_array[currComm[tail - base]] = 1;
// if(i == 169230)
// printf("me[%d]; thread_indx[%ld]; currComm[%ld]\n", me, thread_indx, currComm[tail - base]);
#if __cuda_arch__ >= 600
atomicAdd(&unique_comm_array[currComm[tail - base]-base], 1);
atomicAdd(&unique_weight_array[currComm[tail - base]-base],
graph_edgeList_weight[th_tail_indx]);
#else
#ifndef USE_32_BIT_GRAPH
my_func_atomicAdd(&unique_comm_array[currComm[tail - base]-base], 1);
my_func_atomicAdd(&unique_weight_array[currComm[tail - base]-base],
graph_edgeList_weight[th_tail_indx]);
#else
atomicAdd(&unique_comm_array[currComm[tail - base]-base], 1);
atomicAdd(&unique_weight_array[currComm[tail - base]-base],
graph_edgeList_weight[th_tail_indx]);
#endif
#endif
// printf("new vertex[%ld]; comm_array[%ld]; weight_array[%e] \n",
// i, currComm[tail - base],
// unique_weight_array[currComm[tail - base]]);
}
}
thb_g.sync();
// Make unique cluster vectors of comm and weights
for(GraphElem k = 0; k < ((max_comm_size*FINDING_UNIQCOMM_FACTOR-1)/thb_g.size()+1); k++) {
GraphElem thread_indx = k*thb_g.size() + thb_g.thread_rank();
if(thread_indx < max_comm_size*FINDING_UNIQCOMM_FACTOR) {
if(unique_comm_array[thread_indx] != -1) {
cg::coalesced_group active = cg::coalesced_threads();
GraphElem index_loc;
if(active.thread_rank() == 0) {
#if __cuda_arch__ >= 600
index_loc = atomicAdd(&uniq_clus_vec[i], active.size());
#else
#ifndef USE_32_BIT_GRAPH
index_loc = my_func_atomicAdd(&uniq_clus_vec[i], active.size());
#else
index_loc = atomicAdd(&uniq_clus_vec[i], active.size());
#endif
#endif
// printf("vertex using distGetMaxIndex_large_new[%ld]; num_edges[%ld]; active.size[%d]\n"
// , i, num_edges, active.size());
}
active.sync();
// printf("new vertex[%ld]; thread_index[%ld]; shared_begin_loc[0][%ld] \n",
// i, thread_indx, shared_begin_loc[0]);
if (cc == thread_indx+base) {
counter[i] = unique_weight_array[thread_indx];
// printf("vertex using distGetMaxIndex_large_new[%ld]; counter[%e]\n", i, counter[i]);
}
clmap_comm[clmap_loc_i+active.shfl(index_loc, 0)+active.thread_rank()] =
thread_indx+base;
clmap_weight[clmap_loc_i+active.shfl(index_loc, 0)+active.thread_rank()] =
unique_weight_array[thread_indx];
// clmap_comm[clmap_loc_i+my_loc] = thread_indx;
// clmap_weight[clmap_loc_i+my_loc] = unique_weight_array[thread_indx];
// printf("new vertex[%ld]; clmap_comm[%ld]; clmap_weight[%e] \n",
// i, clmap_comm[shared_begin_loc[0]-active.size()+active.thread_rank()],
// clmap_weight[shared_begin_loc[0]-active.size()+active.thread_rank()]);
}
}
thb_g.sync();
}
thb_g.sync();
// if(thb_g.thread_rank() == 0)
// printf("me[%d]; vertex[%ld]; cc[%ld]; uniq_cls_vec_size[%ld]\n",
// me, i, cc, uniq_clus_vec[i]);
} // (num_edges > CUT_SIZE_NUM_EDGES1) loop
} // if(i >= nv)
} // if(i_index >= size_lt_cs2)
} // chunk size loop
}
template<int tile_sz>
__global__
void distGetMaxIndex_large(
GraphElem nv,
GraphElem* e0, GraphElem* e1,
GraphElem* graph_edgeList_tail, GraphWeight* graph_edgeList_weight,
GraphElem* currComm,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphElem* List_numEdges,
GraphElem* uniq_clus_vec, GraphWeight* counter,
const GraphElem base, const GraphElem bound
) {
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
__shared__ GraphElem t_shared_num_edges[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_weight[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_clmap_loc[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_red_shared_weight[S_BLOCK_TILE];
t_shared_num_edges[ii] = 0;
// if(i >= nv) return;
if(i < nv) {
t_shared_num_edges[ii] = List_numEdges[i];
t_shared_clmap_loc[ii] = clmap_loc[i];
}
/// Create cooperative groups
auto thb_g = cg::this_thread_block();
auto tileIdx = thb_g.thread_rank()/tile_sz;
GraphElem* shared_num_edges = &t_shared_num_edges[tileIdx * tile_sz];
GraphWeight* shared_weight= &t_shared_weight[tileIdx * tile_sz];
GraphElem* shared_clmap_loc = &t_shared_clmap_loc[tileIdx * tile_sz];
GraphWeight *shared_red_weight = &t_red_shared_weight[tileIdx];
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
// tile.sync();
thb_g.sync();
// GraphElem tcomm;
GraphElem Tail;
GraphElem num_edges;
/// Cater to only vertices with num of edges >= tile_sz
/// each thread work on each edge
// for( int wii = 0; wii < tile.size(); wii++)
for( int wii = 0; wii < thb_g.size(); wii++) {
num_edges = t_shared_num_edges[wii];
if(num_edges > CUT_SIZE_NUM_EDGES1) {
// for(int k = 0; k < ((num_edges-1)/tile.size()+1); k++)
for(int k = 0; k < ((num_edges-1)/thb_g.size()+1); k++) {
// GraphElem thread_indx = k*tile.size() + tile.thread_rank();
GraphElem thread_indx = k*thb_g.size() + thb_g.thread_rank();
if(thread_indx < num_edges) {
GraphElem th_tail_indx =
e0[blockIdx.x*blockDim.x+wii]+thread_indx;
Tail = graph_edgeList_tail[th_tail_indx];
clmap_comm[t_shared_clmap_loc[wii]+thread_indx] =
currComm[Tail - base];
clmap_weight[t_shared_clmap_loc[wii]+thread_indx] =
graph_edgeList_weight[th_tail_indx];
}
}
}
}
thb_g.sync();
/// Now find out unique clusters and accumulate weights
GraphElem cc;
// for( int wii = 0; wii < tile_sz; wii++)
for( int wii = 0; wii < thb_g.size(); wii++) {
num_edges = t_shared_num_edges[wii];
// if (blockIdx.x*blockDim.x+tileIdx*tile_sz+wii < nv)
if (blockIdx.x*blockDim.x+wii < nv)
// cc = currComm[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii];
cc = currComm[blockIdx.x*blockDim.x+wii];
GraphWeight tile_sum_Weight;
if(num_edges > CUT_SIZE_NUM_EDGES1) {
GraphElem store_indx = -1;
// if (tile.thread_rank() == 0)
// uniq_clus_vec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii] = 0;
if (thb_g.thread_rank() == 0)
uniq_clus_vec[blockIdx.x*blockDim.x+wii] = 0;
for(GraphElem ko = 0; ko < num_edges; ko++) {
tile_sum_Weight = 0.0;
GraphElem comm_pos = clmap_comm[t_shared_clmap_loc[wii]+ko];
if(comm_pos != -1) {
// if (tile.thread_rank() == 0)
if (thb_g.thread_rank() == 0) {
store_indx += 1;
clmap_comm[t_shared_clmap_loc[wii]+store_indx] = comm_pos;
}
// if (tile.thread_rank() == 0)
if (thb_g.thread_rank() == 0)
clmap_weight[t_shared_clmap_loc[wii]+store_indx] =
clmap_weight[t_shared_clmap_loc[wii]+ko];
// if (tile.thread_rank() == 0)
if (thb_g.thread_rank() == 0)
uniq_clus_vec[blockIdx.x*blockDim.x+wii] += 1;
// if (tile.thread_rank() == 0)
if (thb_g.thread_rank() == 0)
shared_red_weight[0] = 0.0;
shared_weight[thb_g.thread_rank()] = 0.0;
// for(GraphElem k = 0; k < ((num_edges-1)/tile.size()+1); k++)
for(GraphElem k = 0; k < ((num_edges-ko-1)/thb_g.size()+1); k++) {
GraphElem thread_indx = ko + 1 + k*thb_g.size() + thb_g.thread_rank();
if(thread_indx < num_edges) {
if (comm_pos == clmap_comm[t_shared_clmap_loc[wii]+thread_indx]) {
shared_weight[thb_g.thread_rank()] +=
clmap_weight[t_shared_clmap_loc[wii]+thread_indx];
clmap_comm[t_shared_clmap_loc[wii]+thread_indx] = -1;
}
}
}
// tile.sync();
thb_g.sync();
/// Perform reduction to accumulate weights
tile_sum_Weight = weight_reduce(thb_g, t_shared_weight,
t_shared_weight[thb_g.thread_rank()]);
// shared_weight[tile.thread_rank()]);
if(thb_g.thread_rank() ==0) shared_red_weight[0] += tile_sum_Weight;
// thb_g.sync();
// /// Perform reduction at threadblock level
// for (int s =1; s < S_BLOCK_TILE; s *=2)
// {
// int indx = 2 * s * thb_g.thread_rank();
// if(indx < S_BLOCK_TILE) {
// shared_red_weight[indx] = shared_red_weight[indx+s];
// }
// }
// thb_g.sync();
/// Add weights to cluster map
// if (tile.thread_rank() == 0)
if (thb_g.thread_rank() == 0) {
// clmap_weight[t_shared_clmap_loc[wii]+store_indx] += tile_sum_Weight;
clmap_weight[t_shared_clmap_loc[wii]+store_indx] +=
shared_red_weight[0];
// printf("vertex[%ld]; weight[%ld][%e]; ko[%ld] comm_pos[%ld]\n",
// blockIdx.x*blockDim.x+wii, t_shared_clmap_loc[wii]+store_indx,
// clmap_weight[t_shared_clmap_loc[wii]+store_indx], ko, comm_pos);
// if(comm_pos == cc) counter[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii] =
// clmap_weight[shared_clmap_loc[wii]+store_indx];
if(comm_pos == cc) counter[blockIdx.x*blockDim.x+wii] =
clmap_weight[t_shared_clmap_loc[wii]+store_indx];
}
}
// tile.sync();
thb_g.sync();
}
}
} // end of old implementation
}
template<int tile_sz>
__global__
void distGetMaxIndex(
GraphElem nv,
GraphElem* e0, // GraphElem* e1,
GraphElem* graph_edgeList_tail, GraphWeight* graph_edgeList_weight,
GraphElem* currComm,
GraphElem* clmap_loc,
GraphElem* clmap_comm, GraphWeight* clmap_weight,
GraphElem* List_numEdges,
GraphElem* uniq_clus_vec, GraphWeight* counter,
const GraphElem base // , const GraphElem bound
) {
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
__shared__ GraphElem t_shared_num_edges[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_weight[S_THREADBLOCK_SIZE];
__shared__ GraphElem t_shared_clmap_loc[S_THREADBLOCK_SIZE];
t_shared_num_edges[ii] = 0;
// if(i >= nv) return;
if(i < nv) {
t_shared_num_edges[ii] = List_numEdges[i];
t_shared_clmap_loc[ii] = clmap_loc[i];
}
/// Create cooperative groups
auto g = cg::this_thread_block();
auto tileIdx = g.thread_rank()/tile_sz;
GraphElem* shared_num_edges = &t_shared_num_edges[tileIdx * tile_sz];
GraphWeight* shared_weight= &t_shared_weight[tileIdx * tile_sz];
GraphElem* shared_clmap_loc = &t_shared_clmap_loc[tileIdx * tile_sz];
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
tile.sync();
// GraphElem tcomm;
GraphElem Tail;
GraphElem num_edges;
/// Cater to only vertices with num of edges >= tile_sz
/// each thread work on each edge
for( int wii = 0; wii < tile_sz; wii++) {
num_edges = shared_num_edges[wii];
if(num_edges >= (GraphElem)tile_sz && num_edges <= CUT_SIZE_NUM_EDGES1) {
for(int k = 0; k < ((num_edges-1)/tile.size()+1); k++) {
GraphElem thread_indx = k*tile.size() + tile.thread_rank();
if(thread_indx < num_edges) {
GraphElem th_tail_indx =
e0[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii]+thread_indx;
Tail = graph_edgeList_tail[th_tail_indx];
clmap_comm[shared_clmap_loc[wii]+thread_indx] =
currComm[Tail - base];
clmap_weight[shared_clmap_loc[wii]+thread_indx] =
graph_edgeList_weight[th_tail_indx];
}
}
}
}
tile.sync();
/// Cater to only vertices with num of edges < 16
/// each thread work on each vertex
num_edges = shared_num_edges[tile.thread_rank()];
if(num_edges < (GraphElem)tile_sz && num_edges > 0 && i < nv) {
GraphElem edge_low = e0[i];
for (GraphElem j = 0; j < num_edges; j++) {
Tail = graph_edgeList_tail[edge_low+j];
clmap_comm[shared_clmap_loc[tile.thread_rank()]+j] = currComm[Tail - base];
clmap_weight[shared_clmap_loc[tile.thread_rank()]+j] = graph_edgeList_weight[edge_low+j];
}
}
/// Now find out unique clusters and accumulate weights
GraphElem cc;
for( int wii = 0; wii < tile_sz; wii++) {
num_edges = shared_num_edges[wii];
if (blockIdx.x*blockDim.x+tileIdx*tile_sz+wii < nv)
cc = currComm[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii];
GraphWeight tile_sum_Weight;
if(num_edges >= (GraphElem)tile_sz && num_edges <= CUT_SIZE_NUM_EDGES1) {
GraphElem store_indx = -1;
if (tile.thread_rank() == 0)
uniq_clus_vec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii] = 0;
for(GraphElem ko = 0; ko < num_edges; ko++) {
tile_sum_Weight = 0.0;
GraphElem comm_pos = clmap_comm[shared_clmap_loc[wii]+ko];
if(comm_pos != -1) {
if (tile.thread_rank() == 0) {
store_indx += 1;
clmap_comm[shared_clmap_loc[wii]+store_indx] = comm_pos;
}
if (tile.thread_rank() == 0)
clmap_weight[shared_clmap_loc[wii]+store_indx] =
clmap_weight[shared_clmap_loc[wii]+ko];
if (tile.thread_rank() == 0)
uniq_clus_vec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii] += 1;
for(GraphElem k = 0; k < ((num_edges-ko-1)/tile.size()+1); k++) {
GraphElem thread_indx = ko + 1 + k*tile.size() + tile.thread_rank();
shared_weight[tile.thread_rank()] = 0.0;
if(thread_indx < num_edges) {
if (comm_pos == clmap_comm[shared_clmap_loc[wii]+thread_indx]) {
shared_weight[tile.thread_rank()] =
clmap_weight[shared_clmap_loc[wii]+thread_indx];
clmap_comm[shared_clmap_loc[wii]+thread_indx] = -1;
}
}
tile.sync();
/// Perform reduction to accumulate weights
tile_sum_Weight += weight_reduce_sum_tile_shfl<tile_sz>
(tile, shared_weight[tile.thread_rank()]);
}
/// Add weights to cluster map
if (tile.thread_rank() == 0) {
clmap_weight[shared_clmap_loc[wii]+store_indx] += tile_sum_Weight;
if(comm_pos == cc) counter[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii] =
clmap_weight[shared_clmap_loc[wii]+store_indx];
}
}
tile.sync();
}
}
}
/// repeate for num_edges < 16
// if( i < nv) return;
num_edges = shared_num_edges[tile.thread_rank()];
if(num_edges < (GraphElem)tile_sz && num_edges > 0 && i < nv) {
cc = currComm[i];
uniq_clus_vec[i] = 0;
GraphElem store_indx = -1;
int counter_switch = 1;
for(GraphElem ko = 0; ko < num_edges; ko++) {
GraphElem comm_pos = clmap_comm[shared_clmap_loc[tile.thread_rank()]+ko];
// GraphElem comm_count = 1;
if(comm_pos != -1) {
uniq_clus_vec[i] += 1;
store_indx += 1;
clmap_comm[shared_clmap_loc[tile.thread_rank()]+store_indx] = comm_pos;
clmap_weight[shared_clmap_loc[tile.thread_rank()]+store_indx] =
clmap_weight[shared_clmap_loc[tile.thread_rank()]+ko];
for(GraphElem k = ko+1; k < num_edges; k++) {
if (comm_pos == clmap_comm[shared_clmap_loc[tile.thread_rank()]+k]) {
clmap_comm[shared_clmap_loc[tile.thread_rank()]+k] = -1;
clmap_weight[shared_clmap_loc[tile.thread_rank()]+store_indx] +=
clmap_weight[shared_clmap_loc[tile.thread_rank()]+k];
}
}
if (comm_pos == cc && counter_switch == 1) {
counter_switch = 0;
counter[i] += clmap_weight[shared_clmap_loc[tile.thread_rank()]+store_indx];
}
}
}
}
}
template<int tile_sz>
__global__
void distBuildLocalMapCounter(
/// Accumulate selfLoopVec for all vertices
GraphElem nv,
GraphElem* e0, GraphElem* e1,
GraphElem* graph_edgeList_tail, GraphWeight* graph_edgeList_weight,
GraphElem* List_numEdges, GraphWeight* selfLoopVec,
const GraphElem base // , const GraphElem bound
) {
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
__shared__ GraphElem t_shared_num_edges[S_THREADBLOCK_SIZE];
__shared__ GraphWeight t_shared_block_weight[S_THREADBLOCK_SIZE];
t_shared_num_edges[ii] = 0;
// if(i >= nv) return;
if(i < nv) {
t_shared_num_edges[ii] = List_numEdges[i];
}
#if __cuda_arch__ >= 700
auto tile = cg::partition<tile_sz>(cg::this_thread_block());
#else
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
#endif
auto g = cg::this_thread_block();
auto tileIdx = g.thread_rank()/tile_sz;
GraphElem *shared_num_edges = &t_shared_num_edges[tileIdx * tile_sz];
GraphWeight *shared_block_weight = &t_shared_block_weight[tileIdx * tile_sz];
tile.sync();
/// Cater to only vertices with num of edges >= tile_sz
/// and work on each vertex
/// this implementation uses cooperative groups and
/// uses a large thread block size say 128 to increase occupancy
for( int wii = 0; wii < tile_sz; wii++) {
GraphElem num_edges = shared_num_edges[wii];
shared_block_weight[tile.thread_rank()] = 0.0;
if(num_edges >= (GraphElem)tile_sz) {
for(int k = 0; k < ((num_edges-1)/tile.size()+1); k++) {
GraphElem thread_indx = k*tile.size() + tile.thread_rank();
if(thread_indx < num_edges) {
if(graph_edgeList_tail[
e0[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii]+thread_indx] ==
blockIdx.x * blockDim.x + tileIdx*tile_sz+wii + base)
shared_block_weight[tile.thread_rank()] +=
graph_edgeList_weight[
e0[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii]+thread_indx];
}
}
tile.sync();
GraphWeight tile_sum_Weight = weight_reduce_sum_tile_shfl<tile_sz>
(tile, shared_block_weight[tile.thread_rank()]);
if (tile.thread_rank() == 0)
#if __cuda_arch__ >= 600
atomicAdd(&selfLoopVec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii], tile_sum_Weight);
#else
#ifndef USE_32_BIT_GRAPH
my_func_atomicAdd(&selfLoopVec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii], tile_sum_Weight);
#else
atomicAdd(&selfLoopVec[blockIdx.x*blockDim.x+tileIdx*tile_sz+wii], tile_sum_Weight);
#endif
#endif
}
}
/// Cater to only vertices with num of edges >= 16
if(i >= nv) return;
GraphWeight selfLoop = 0;
if(shared_num_edges[tile.thread_rank()] < (GraphElem)tile_sz) {
for (GraphElem j = e0[i]; j < e1[i]; j++) {
if(graph_edgeList_tail[j] == i + base)
selfLoop += graph_edgeList_weight[j];
}
selfLoopVec[i] = selfLoop;
}
}
template<int tile_sz>
__global__
void count_size_clmap (GraphElem nv, GraphElem* NumClusters,
GraphElem* clmap_loc, GraphElem* size_clmap,
GraphElem* size_lt_ts, GraphElem* list_lt_ts,
GraphElem* size_lt_cs1, GraphElem* list_lt_cs1,
GraphElem* size_lt_cs2, GraphElem* list_lt_cs2,
GraphElem* e0, GraphElem* e1, GraphElem* List_numEdges) {
int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
#ifndef USE_32_BIT_GRAPH
__shared__ long shared_mem_size[MS_THREADBLOCK_SIZE];
__shared__ long shared_begin_loc[MS_BLOCK_TILE];
#else
__shared__ int shared_mem_size[MS_THREADBLOCK_SIZE];
__shared__ int shared_begin_loc[MS_BLOCK_TILE];
#endif
shared_mem_size[ii] = 0;
GraphElem numEdges = 0;
#ifdef DEBUG_CUVITE
int my_mem_block = 0;
#endif
// GraphElem numEdges;
if(i < nv) {
numEdges = e1[i] - e0[i];
List_numEdges[i] = numEdges;
// if(numEdges >= 90 && numEdges < 96)
// printf("vertex[%ld]; numEdges[%ld]\n",i, numEdges);
#ifdef DEBUG_CUVITE
if(numEdges > 0) my_mem_block = 1;
#endif
// printf("vertex[%ld]; num_edges[%d] \n", i, numEdges);
// shared_mem_size[ii] = numEdges * sizeof(GraphElem);
shared_mem_size[ii] = (GraphElem) numEdges;
// printf("vertex[%ld]; shared_mem_size[%d] \n", i, shared_mem_size[ii]);
}
auto g = cg::this_thread_block();
#if __cuda_arch__ >= 700
auto tile = cg::partition<tile_sz>(cg::this_thread_block());
#else
auto tile = cg::tiled_partition<tile_sz>(cg::this_thread_block());
#endif
auto tileIdx = g.thread_rank()/tile_sz;
GraphElem *t_shared_mem_size = &shared_mem_size[tileIdx * tile_sz];
GraphElem *t_shared_begin_loc = &shared_begin_loc[tileIdx];
tile.sync();
/// Mem loc inside the block
GraphElem my_mem_loc = 0;
for(int s = 0; s < tile.size(); s++) {
if(tile.thread_rank() > s) my_mem_loc += t_shared_mem_size[s];
}
tile.sync();
#if 1
/// Accumulate number of clusters and mem requirement
for(int s = 1; s < tile.size(); s *= 2) {
int indx = 2 * s * tile.thread_rank();
if(indx < tile.size()) {
t_shared_mem_size[indx] += t_shared_mem_size[indx+s];
}
tile.sync();
}
#else
GraphElem tile_sum_size = reduce_sum_tile_shfl<tile_sz>
(tile, t_shared_mem_size[tile.thread_rank()]);
#endif
#ifdef DEBUG_CUVITE
int tile_sum_mem = reduce_sum_tile_shfl<tile_sz>(tile, my_mem_block);
#endif
if(tile.thread_rank() == 0) {
// printf("shared_mem_block[%d]; shared_mem_size[%ld]; blockIdx.x[%d] \n",
// tile_sum_mem, shared_mem_size[0], blockIdx.x);
// printf("shared_mem_block[%d]; shared_mem_size[%ld]; blockIdx.x[%d] \n",
// tile_sum_mem, tile_sum_size, blockIdx.x);
#if __cuda_arch__ >= 600
#ifdef DEBUG_CUVITE
atomicAdd(&NumClusters[0], tile_sum_mem);
#endif
t_shared_begin_loc[0] = atomicAdd(&size_clmap[0], t_shared_mem_size[0]);
// t_shared_begin_loc[0] = atomicAdd(&size_clmap[0], tile_sum_size);
#else
#ifdef DEBUG_CUVITE
my_func_atomicAdd(&NumClusters[0], tile_sum_mem);
#endif
#ifndef USE_32_BIT_GRAPH
t_shared_begin_loc[0] = my_func_atomicAdd(&size_clmap[0], t_shared_mem_size[0]);
#else
t_shared_begin_loc[0] = atomicAdd(&size_clmap[0], t_shared_mem_size[0]);
#endif
#endif
}
tile.sync();
#ifdef DEBUG_CUVITE
// if(i == 0) printf("Number of edges in a block[%ld]\n", NumClusters[0]);
#endif
if(i >= nv) return;
if(numEdges > 0) {
clmap_loc[i] = t_shared_begin_loc[0] + my_mem_loc;
// printf("vertex[%ld]; shared_begin_loc0[%d]; numEdges[%ld]; my_mem_loc[%ld]; clmap_loc[%ld] \n",
// i, t_shared_begin_loc[0], numEdges, my_mem_loc, clmap_loc[i]);
} else {
clmap_loc[i] = -1;
}
/// Group vertices based on degree
tile.sync();
if(numEdges <= (GraphElem)tile_sz) {
cg::coalesced_group active = cg::coalesced_threads();
#if __cuda_arch__ >= 600
if(active.thread_rank() == 0)
t_shared_begin_loc[0] = atomicAdd(&size_lt_ts[0], active.size());
#else
#ifndef USE_32_BIT_GRAPH
if(active.thread_rank() == 0)
t_shared_begin_loc[0] = my_func_atomicAdd(&size_lt_ts[0], active.size());
#else
if(active.thread_rank() == 0)
t_shared_begin_loc[0] = atomicAdd(&size_lt_ts[0], active.size());
#endif
#endif
active.sync();
list_lt_ts[t_shared_begin_loc[0]+active.thread_rank()] = i;
}
tile.sync();
if(numEdges > (GraphElem)tile_sz && numEdges <= CUT_SIZE_NUM_EDGES1) {
cg::coalesced_group active = cg::coalesced_threads();
GraphElem index_loc;
#if __cuda_arch__ >= 600
if(active.thread_rank() == 0)
index_loc = atomicAdd(&size_lt_cs1[0], active.size());
#else
if(active.thread_rank() == 0)
#ifndef USE_32_BIT_GRAPH
index_loc = my_func_atomicAdd(&size_lt_cs1[0], active.size());
#else
index_loc = atomicAdd(&size_lt_cs1[0], active.size());
#endif
#endif
// active.sync();
list_lt_cs1[active.shfl(index_loc, 0)+active.thread_rank()] = i;
}
tile.sync();
if(numEdges > CUT_SIZE_NUM_EDGES1) {
// printf("vertex[%ld]; lt_cs2_num_edges[%ld] \n", i, numEdges);
cg::coalesced_group active = cg::coalesced_threads();
GraphElem index_loc;
#if __cuda_arch__ >= 600
if(active.thread_rank() == 0)
index_loc = atomicAdd(&size_lt_cs2[0], active.size());
#else
#ifndef USE_32_BIT_GRAPH
if(active.thread_rank() == 0)
index_loc = my_func_atomicAdd(&size_lt_cs2[0], active.size());
#else
if(active.thread_rank() == 0)
index_loc = atomicAdd(&size_lt_cs2[0], active.size());
#endif
#endif
// active.sync();
list_lt_cs2[active.shfl(index_loc, 0)+active.thread_rank()] = i;
}
}
__global__
void gpu_distExecuteLouvainIteration(
const GraphElem nv,
GraphElem* graph_edgeListIndexes,
GraphElem* GraphEdge_low, GraphElem* GraphEdge_high,
int me, const GraphElem base, const GraphElem bound
) {
// int ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
if(i >= nv) return;
GraphElem e0, e1;
e0 = graph_edgeListIndexes[i];
e1 = graph_edgeListIndexes[i+1];
/// Store variables to global memory
GraphEdge_low[i] = e0;
GraphEdge_high[i] = e1;
}
template<class T>
__global__ void print_device_vector(T *given_vec, GraphElem size_vec)
{
GraphElem ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
if(i >= size_vec) return;
printf("i[%ld]; VEC_VALUE[%f]\n", i, given_vec[i]);
}
__global__ void print_device_vector2(GraphWeight* given_vec,
GraphElem size_vec)
{
// GraphElem ii = threadIdx.x;
GraphElem i = blockIdx.x * blockDim.x + threadIdx.x ;
if(i >= size_vec) return;
printf("i[%ld]; VEC_VALUE[%f]\n", i, given_vec[i]);
}
void set_gpuDevices(int *me)
{
int num_gpuDevices;
cudaGetDeviceCount(&num_gpuDevices);
#if 1
/// split MPI comm to get local node rank
/// cudaSetDevice to local node rank
MPI_Comm loc_comm;
MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, *me,
MPI_INFO_NULL, &loc_comm);
int node_rank = -1;
MPI_Comm_rank(loc_comm,&node_rank);
// std::cout << "me:" << *me << "; node rank:" << node_rank << std::endl;
cudaError_t cudaStat;
cudaStat = cudaSetDevice(node_rank);
for (int dev_id = 0; dev_id < num_gpuDevices; dev_id++) {
if( node_rank%num_gpuDevices == dev_id) {
cudaStat = cudaSetDevice(dev_id);
// std::cout << "me[" << *me << "]; node rank[" << node_rank
// << "]; dev_id[" << dev_id << "]" << std::endl;
}
}
if(cudaStat != cudaSuccess)
printf("Process %d; ERROR DEVICE FAILED\n", *me);
MPI_Comm_free(&loc_comm);
#else
/// cudaSetDevice to MPI rank
cudaError_t cudaStat;
for (int dev_id = 0; dev_id < num_gpuDevices; dev_id++) {
if( *me%num_gpuDevices == dev_id) cudaStat = cudaSetDevice(dev_id);
}
// cudaStat = cudaSetDevice(2);
if(cudaStat != cudaSuccess)
printf("Process %d; ERROR DEVICE FAILED\n", *me);
#endif
}
int gpu_for_louvain_iteration(
const GraphElem nv, const DistGraph &dg,
CommunityVector &currComm,
CommunityVector &targetComm,
GraphWeightVector &vDegree,
CommVector &localCinfo,
CommVector &localCupdate,
VertexCommMap &remoteComm,
const CommMap &remoteCinfo,
CommMap &remoteCupdate,
const double constantForSecondTerm,
GraphWeightVector &clusterWeight,
int me, int numIters, GpuGraph &gpu_graph)
{
if(nv <= 0) return 1;
const GraphElem base = dg.getBase(me), bound = dg.getBound(me);
#ifdef USE_HYBRID_CPU_GPU // Run hybrid CPU-GPU code
// create a temporary target buffer
std::vector<GraphElem> temp_targetComm_cpu = targetComm;
std::vector<GraphElem> temp_targetComm_gpu = targetComm;
static GraphElem num_vertex_cpu, num_vertex_gpu;
static double time_cpu, time_gpu;
if(numIters == 1)
{
time_cpu = 1.e0;
time_gpu = 1.e0;
}
if(time_cpu >= time_gpu){
num_vertex_cpu = num_vertex_cpu -
nv * (time_cpu - time_gpu) / (time_cpu + time_gpu) / 3;
}
if(time_cpu < time_gpu){
num_vertex_cpu = num_vertex_cpu +
nv * (time_gpu - time_cpu) / (time_cpu + time_gpu) / 3;
}
if(num_vertex_cpu <= 0) num_vertex_cpu = nv * 1/80;
if(num_vertex_cpu > nv ) num_vertex_cpu = nv * 9/10;
// if(numIters == 1) num_vertex_cpu = nv * 1/20;
if(numIters == 1) num_vertex_cpu = nv * 1/3;
// num_vertex_cpu = 0;
num_vertex_gpu = nv - num_vertex_cpu;
#ifdef PRINT_HYBRID
std::cout << "me[" << me << "]; nv: " << nv << "; num_vertex_gpu: " <<
num_vertex_gpu << "; num_vertex_cpu: " << num_vertex_cpu << std::endl;
#endif
int num_avail_threads = omp_get_num_threads();
const int maxNumThreads = omp_get_max_threads();
omp_set_num_threads(2);
omp_set_nested(1);
double t0 = timer();
#pragma omp parallel sections
{
#pragma omp section
{ //call CPU function
omp_set_num_threads(8);
#pragma omp parallel default(none), shared(clusterWeight, localCupdate, currComm, targetComm, \
vDegree, localCinfo, remoteCinfo, remoteComm, dg, remoteCupdate, me, \
temp_targetComm_cpu, num_vertex_gpu), \
firstprivate(constantForSecondTerm)
{
// distCleanCWandCU(nv, clusterWeight, localCupdate);
#ifdef OMP_SCHEDULE_RUNTIME
#pragma omp for schedule(runtime)
#else
#pragma omp for schedule(guided)
#endif
for (GraphElem i = num_vertex_gpu; i < nv; i++) {
distExecuteLouvainIteration_hybrid(i, dg, currComm, targetComm, vDegree, localCinfo,
localCupdate, remoteComm, remoteCinfo, remoteCupdate,
constantForSecondTerm, clusterWeight, me, temp_targetComm_cpu);
}
}
double t1 = timer();
time_cpu = t1 - t0;
#ifdef PRINT_HYBRID
std::cout << "me[" << me << "]; Time CPU: " << time_cpu << std::endl;
#endif
} // close cpu section
#pragma omp section
{ /// call GPU function
omp_set_num_threads(4);
if (num_vertex_gpu > 0) {
/// size equal to nv X GraphElem
int mem_size_GraphElem_nv = sizeof(GraphElem) * nv;
/// size equal to nv X GraphWeight
int mem_size_GraphWeight_nv = sizeof(GraphWeight) * nv;
/// All following have size = nv
GraphElem size_localCinfo = localCinfo.size();
GraphElem* temp_ModlocalCinfo_size = gpu_graph.getPinned_ModlocalCinfo_size();
GraphWeight* temp_ModlocalCinfo_degree = gpu_graph.getPinned_ModlocalCinfo_degree();
#pragma omp parallel default(none), \
shared(localCinfo, temp_ModlocalCinfo_size, temp_ModlocalCinfo_degree)
#pragma omp for schedule(guided)
for(int ii=0; ii<localCinfo.size(); ii++) {
temp_ModlocalCinfo_size[ii] = localCinfo[ii].size;
temp_ModlocalCinfo_degree[ii] = localCinfo[ii].degree;
}
/// Remote Community Info
/// First get the keys of remoteCinfo map
std::vector<GraphElem> temp_remoteCinfo_key = extract_keys_CommMap(remoteCinfo);
GraphElem size_remoteCinfo = remoteCinfo.size();
/// split RemoteCinfo into vectors for different struct elements
std::vector<GraphElem>temp_remoteCinfo_size =
extract_value_CommMap_size(remoteCinfo);
std::vector<GraphWeight>temp_remoteCinfo_degree =
extract_value_CommMap_degree(remoteCinfo);
/// now modify currComm to include remoteComm
GraphElem* temp_ModlocalCinfo_oComm = gpu_graph.getPinned_ModlocalCinfo_oComm();
ClusterLocalMap localCinfo_to_remoteCinfo_map;
ClusterLocalMap::const_iterator storedAlready;
GraphElem temp_counter_01 = 0;
std::vector<GraphElem> ModcurrComm = currComm;
for(int ii=0; ii<temp_remoteCinfo_key.size(); ii++) {
GraphElem temp_Comm = temp_remoteCinfo_key[ii];
if(temp_Comm < base || temp_Comm >= bound)
{
storedAlready = localCinfo_to_remoteCinfo_map.find(temp_Comm);
if(storedAlready == localCinfo_to_remoteCinfo_map.end()) {
localCinfo_to_remoteCinfo_map.insert(std::make_pair(
temp_Comm, (temp_counter_01+bound)));
temp_ModlocalCinfo_size[size_localCinfo+temp_counter_01] = temp_remoteCinfo_size[ii];
temp_ModlocalCinfo_degree[size_localCinfo+temp_counter_01] = temp_remoteCinfo_degree[ii];
temp_ModlocalCinfo_oComm[temp_counter_01] = temp_Comm;
temp_counter_01++;
}
}
}
GraphElem size_ModlocalCinfo = size_localCinfo+temp_counter_01;
GraphElem size_ModlocalCinfo_oComm = temp_counter_01;
std::vector<GraphElem>().swap(temp_remoteCinfo_key);
CommunityVector().swap(temp_remoteCinfo_size);
GraphWeightVector().swap(temp_remoteCinfo_degree);
// remoteComm is broken into 2 arrays
std::vector<GraphElem> temp_remoteComm_v;
temp_remoteComm_v = extract_vertex_VertexCommMap(remoteComm);
std::vector<GraphElem> temp_remoteComm_comm;
temp_remoteComm_comm = extract_comm_VertexCommMap(remoteComm);
/// Create map for remoteComm tail mapped to currComm
ClusterLocalMap remoteComm_to_currComm_map_v;
ClusterLocalMap::const_iterator storedAlready_v;
ClusterLocalMap::const_iterator storedAlready_comm;
GraphElem temp_counter_02 = 0;
GraphElem temp_tail;
GraphElem temp_comm, temp_comm_mapped;
// First modify currComm
#pragma omp parallel default(none), \
shared(ModcurrComm, localCinfo_to_remoteCinfo_map), \
private(temp_comm, storedAlready_comm)
#pragma omp for schedule(guided)
for(int ii = 0; ii < ModcurrComm.size(); ii++) {
temp_comm = ModcurrComm[ii];
if(temp_comm < base || temp_comm >= bound) {
storedAlready_comm = localCinfo_to_remoteCinfo_map.find(temp_comm);
ModcurrComm[ii] = storedAlready_comm->second;
}
}
// Next modify currComm to include remoteComm
for(int ii=0; ii<temp_remoteComm_comm.size(); ii++) {
temp_comm = temp_remoteComm_comm[ii];
temp_tail = temp_remoteComm_v[ii];
if(temp_comm < base || temp_comm >= bound) {
storedAlready_comm = localCinfo_to_remoteCinfo_map.find(temp_comm);
temp_comm_mapped = storedAlready_comm->second;
temp_remoteComm_comm[ii] = temp_comm_mapped;
ModcurrComm.push_back(temp_comm_mapped);
} else {
ModcurrComm.push_back(temp_comm);
/// check line below
}
if(temp_tail < base || temp_tail >= bound) {
storedAlready_v = remoteComm_to_currComm_map_v.find(temp_tail);
if(storedAlready_v == remoteComm_to_currComm_map_v.end()) {
if(temp_tail < base || temp_tail >= bound) {
remoteComm_to_currComm_map_v.insert(std::make_pair(
temp_tail, (bound + temp_counter_02) ));
temp_remoteComm_v[ii] = bound + temp_counter_02;
}
temp_counter_02++;
}
}
}
// }
std::vector<GraphElem>().swap(temp_remoteComm_v);
std::vector<GraphElem>().swap(temp_remoteComm_comm);
// comm_node_info remote_comm_info;
const Graph &g = dg.getLocalGraph();
GraphElem size_edgeListIndexes = g.edgeListIndexes.size();
GraphElem* temp_graph_edgeList_tail = gpu_graph.getPinned_edgeList_tail();
GraphWeight* temp_graph_edgeList_weight = gpu_graph.getPinned_edgeList_weight();
#pragma omp parallel default(none), shared(g, temp_graph_edgeList_tail, \
temp_graph_edgeList_weight, remoteComm_to_currComm_map_v), \
private(storedAlready)
#pragma omp for schedule(guided)
for(int ii=0; ii<g.edgeList.size(); ii++) {
ClusterLocalMap edgeList_tail_map;
GraphElem temp_tail = g.edgeList[ii].tail;
temp_graph_edgeList_tail[ii] = temp_tail;
temp_graph_edgeList_weight[ii] = g.edgeList[ii].weight;
if(temp_tail < base || temp_tail >= bound) {
/// use remoteComm_to_currComm_map_v map instead
storedAlready = remoteComm_to_currComm_map_v.find(temp_tail);
if(storedAlready != edgeList_tail_map.end()) {
temp_graph_edgeList_tail[ii] = storedAlready->second;
}
}
}
#ifdef PRINT_TIMEDS
double t_remap = timer();
double time_remap = t_remap - t0;
std::cout << "me[" << me << "]; Time gpu_remap: " << time_remap << std::endl;
#endif
/// Get pointers to memory of device arrays
GraphElem* dev_currComm = gpu_graph.get_currComm();
GraphElem* dev_ModlocalTarget = gpu_graph.get_ModlocalTarget();
GraphWeight* dev_vDegree = gpu_graph.get_vDegree();
GraphWeight* dev_clusterWeight = gpu_graph.get_clusterWeight();
GraphElem* dev_edgeListIndexes = gpu_graph.get_edgeListIndexes();
GraphElem* dev_ModcurrComm = gpu_graph.get_ModcurrComm();
GraphElem* dev_localCinfo_size = gpu_graph.get_ModlocalCinfo_size();
GraphWeight* dev_localCinfo_degree = gpu_graph.get_ModlocalCinfo_degree();
GraphElem* dev_localCinfo_oComm = gpu_graph.get_ModlocalCinfo_oComm();
GraphElem* dev_graph_edgeList_tail = gpu_graph.get_edgeList_tail();
GraphWeight* dev_graph_edgeList_weight = gpu_graph.get_edgeList_weight();
GraphElem* dev_unique_comm_array = gpu_graph.get_unique_comm_array();
GraphWeight* dev_unique_weight_array = gpu_graph.get_unique_weight_array();
gpu_graph.cpyVecTodev(currComm, dev_currComm);
gpu_graph.cpyVecTodev(vDegree, dev_vDegree);
gpu_graph.cpyVecTodev(clusterWeight, dev_clusterWeight);
gpu_graph.cpyVecTodev(g.edgeListIndexes, dev_edgeListIndexes);
bool check_ModlocalCinfo_memory = gpu_graph.checkModCommMemory(size_ModlocalCinfo);
assert(check_ModlocalCinfo_memory);
bool check_ModlocalCinfoComm_memory = gpu_graph.checkModCommMemory(size_ModlocalCinfo_oComm);
assert(check_ModlocalCinfoComm_memory);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_size, dev_localCinfo_size, size_ModlocalCinfo);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_degree, dev_localCinfo_degree, size_ModlocalCinfo);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_oComm, dev_localCinfo_oComm, size_ModlocalCinfo_oComm);
gpu_graph.cpyArrayTodev(temp_graph_edgeList_tail, dev_graph_edgeList_tail,
(GraphElem)g.edgeList.size());
gpu_graph.cpyArrayTodev(temp_graph_edgeList_weight, dev_graph_edgeList_weight,
(GraphElem)g.edgeList.size());
bool check_ModcurrComm_memory = gpu_graph.checkModCommMemory(
(GraphElem)ModcurrComm.size());
assert(check_ModcurrComm_memory);
gpu_graph.cpyVecTodev(ModcurrComm, dev_ModcurrComm);
GraphElem* dev_GraphEdge_low = gpu_graph.get_GraphEdge_low();
GraphElem* dev_GraphEdge_high = gpu_graph.get_GraphEdge_high();
/// allocate device memory for filling in comm and weights
GraphElem* dev_clmap_comm = gpu_graph.get_clmap_comm();
GraphWeight* dev_clmap_weight = gpu_graph.get_clmap_weight();
GraphElem clmapSize;
GraphElem* dev_clmap_loc = gpu_graph.get_clmap_loc();
GraphElem* dev_List_numEdges = gpu_graph.get_List_numEdges();
GraphElem* dev_list_lt_ts = gpu_graph.get_dev_list_lt_ts();
GraphElem* dev_list_lt_cs1 = gpu_graph.get_dev_list_lt_cs1();
GraphElem* dev_list_lt_cs2 = gpu_graph.get_dev_list_lt_cs2();
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_dtrans = timer();
double time_dtrans = t_dtrans - t0;
std::cout << "me[" << me << "]; Time gpu_dtrans: " << time_dtrans << std::endl;
#endif
if(numIters == 1)
{
CUDA_SAFE(cudaMemset(dev_GraphEdge_low, 0, mem_size_GraphElem_nv));
CUDA_SAFE(cudaMemset(dev_GraphEdge_high, 0, mem_size_GraphElem_nv));
dim3 numBlocks01( (nv-1) / L_THREADBLOCK_SIZE + 1);
dim3 Block_dim01(L_THREADBLOCK_SIZE);
gpu_distExecuteLouvainIteration<<<numBlocks01,Block_dim01>>>(
nv,
dev_edgeListIndexes,
dev_GraphEdge_low, dev_GraphEdge_high,
me, base, bound);
CUDA_SAFE(cudaMemset(dev_List_numEdges, 0, mem_size_GraphElem_nv));
CUDA_SAFE(cudaMemset(dev_clmap_loc, 0, mem_size_GraphElem_nv));
GraphElem* dev_NumClusters = gpu_graph.get_NumClusters();
CUDA_SAFE(cudaMemset(dev_NumClusters, 0, sizeof(GraphElem)));
GraphElem* dev_size_clmap = gpu_graph.get_size_clmap();
CUDA_SAFE(cudaMemset(dev_size_clmap, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_ts = gpu_graph.get_dev_size_lt_ts();
CUDA_SAFE(cudaMemset(dev_size_lt_ts, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_cs1 = gpu_graph.get_dev_size_lt_cs1();
CUDA_SAFE(cudaMemset(dev_size_lt_cs1, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_cs2 = gpu_graph.get_dev_size_lt_cs2();
CUDA_SAFE(cudaMemset(dev_size_lt_cs2, 0, sizeof(GraphElem)));
CUDA_SAFE(cudaMemset(dev_list_lt_ts, 0, mem_size_GraphElem_nv));
CUDA_SAFE(cudaMemset(dev_list_lt_cs1, 0, mem_size_GraphElem_nv));
CUDA_SAFE(cudaMemset(dev_list_lt_cs2, 0, mem_size_GraphElem_nv));
dim3 numBlocks02( (nv-1) / MS_THREADBLOCK_SIZE + 1);
dim3 Block_dim02(MS_THREADBLOCK_SIZE);
count_size_clmap<PHY_WRP_SZ><<<numBlocks02,Block_dim02>>>(nv, dev_NumClusters,
dev_clmap_loc, dev_size_clmap,
dev_size_lt_ts, dev_list_lt_ts,
dev_size_lt_cs1, dev_list_lt_cs1,
dev_size_lt_cs2, dev_list_lt_cs2,
dev_GraphEdge_low, dev_GraphEdge_high, dev_List_numEdges);
/// copy to host number of clusters and size of cluster map memory
#ifdef DEBUG_CUVITE
GraphElem NumClusters = 0;
CUDA_SAFE(cudaMemcpy(&NumClusters, dev_NumClusters,
sizeof(GraphElem), cudaMemcpyDeviceToHost));
std::cout << "me[" << me << "]; NumClusters[ " << NumClusters << "]" << std::endl;
#endif
CUDA_SAFE(cudaMemcpy(&clmapSize, dev_size_clmap,
sizeof(GraphElem), cudaMemcpyDeviceToHost));
gpu_graph.set_clmapSize(clmapSize);
dev_clmap_comm = gpu_graph.getDevMem_clmapComm(clmapSize);
dev_clmap_weight = gpu_graph.getDevMem_clmapWeight(clmapSize);
gpu_graph.set_size_lt_ts();
gpu_graph.set_size_lt_cs1();
gpu_graph.set_size_lt_cs2();
}
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_iter1 = timer();
double time_iter1 = t_iter1 - t0;
std::cout << "me[" << me << "]; Time gpu_iter1: " << time_iter1 << std::endl;
#endif
GraphElem size_lt_ts = gpu_graph.get_size_lt_ts();
GraphElem size_lt_cs1 = gpu_graph.get_size_lt_cs1();
GraphElem size_lt_cs2 = gpu_graph.get_size_lt_cs2();
clmapSize = gpu_graph.get_clmapSize();
CUDA_SAFE(cudaMemset(dev_clmap_comm, 0,
clmapSize * sizeof(GraphElem)));
CUDA_SAFE(cudaMemset(dev_clmap_weight, 0,
clmapSize * sizeof(GraphWeight)));
GraphWeight* dev_selfLoopVec = gpu_graph.get_selfLoopVec();
CUDA_SAFE(cudaMemset(dev_selfLoopVec, 0, mem_size_GraphWeight_nv));
dim3 numBlocks03( (num_vertex_gpu-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim03(S_THREADBLOCK_SIZE);
distBuildLocalMapCounter<PHY_WRP_SZ><<<numBlocks03,Block_dim03>>>(
num_vertex_gpu, dev_GraphEdge_low, dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_List_numEdges, dev_selfLoopVec,
base); // , bound);
GraphElem* dev_uniq_clus_vec = gpu_graph.get_uniq_clus_vec();
CUDA_SAFE(cudaMemset(dev_uniq_clus_vec, 0, mem_size_GraphElem_nv));
GraphWeight* dev_counter = gpu_graph.get_counter();
CUDA_SAFE(cudaMemset(dev_counter, 0, mem_size_GraphWeight_nv));
const int num_streams = 2;
cudaStream_t streams[num_streams];
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
CUDA_SAFE(cudaStreamCreate(&streams[i_streams]) );
}
dim3 numBlocks05( (num_vertex_gpu-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim05(S_THREADBLOCK_SIZE);
distGetMaxIndex<PHY_WRP_SZ><<<numBlocks05,Block_dim05, 0, streams[0]>>>(
num_vertex_gpu,
dev_GraphEdge_low, // dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base); // , bound);
#if 0
distGetMaxIndex_large<PHY_WRP_SZ><<<numBlocks05,Block_dim05, 0, streams[0]>>>(
num_vertex_gpu,
dev_GraphEdge_low, dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base, bound);
#else
if(size_lt_cs2 > 0) {
dim3 numBlocks052(FINDING_UNIQCOMM_NUM_BLOCKS);
dim3 Block_dim052(FINDING_UNIQCOMM_BLOCK_SIZE);
GraphElem nv_chunk_size;
nv_chunk_size = (size_lt_cs2 - 1) / FINDING_UNIQCOMM_NUM_BLOCKS + 1;
assert(ModcurrComm.size() <= FINDING_UNIQCOMM_ARRAY_SIZE);
distGetMaxIndex_large_new<PHY_WRP_SZ><<<numBlocks052,Block_dim052, 0, streams[0]>>>(
me, numIters,
num_vertex_gpu, nv_chunk_size,
size_lt_cs2, dev_list_lt_cs2,
ModcurrComm.size(), // size_ModlocalCinfo,
dev_unique_comm_array,
dev_unique_weight_array,
dev_GraphEdge_low, // dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base); // , bound);
}
#endif
CUDA_SAFE(cudaMemcpy(dev_ModlocalTarget, dev_ModcurrComm,
sizeof(GraphElem)*ModcurrComm.size(), cudaMemcpyDeviceToDevice));
dim3 numBlocks06( (num_vertex_gpu-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim06(S_THREADBLOCK_SIZE);
computeMaxIndex<PHY_WRP_SZ><<<numBlocks06,Block_dim06, 0, streams[0]>>>(
// nv,
num_vertex_gpu,
dev_currComm,
dev_ModcurrComm,
dev_localCinfo_size,
dev_localCinfo_degree,
dev_localCinfo_oComm,
dev_selfLoopVec,
dev_uniq_clus_vec, dev_counter,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_vDegree,
dev_ModlocalTarget,
dev_clusterWeight,
constantForSecondTerm,
base, bound);
computeMaxIndex_large<PHY_WRP_SZ><<<numBlocks06,Block_dim06, 0, streams[0]>>>(
// nv,
num_vertex_gpu,
dev_currComm,
dev_ModcurrComm,
dev_localCinfo_size,
dev_localCinfo_degree,
dev_localCinfo_oComm,
dev_selfLoopVec,
dev_uniq_clus_vec, dev_counter,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_vDegree,
dev_ModlocalTarget,
dev_clusterWeight,
constantForSecondTerm,
base, bound);
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_kernels = timer();
double time_kernels = t_kernels - t0;
std::cout << "me[" << me << "]; Time gpu_kernels: " << time_iter1 << std::endl;
#endif
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
cudaStreamSynchronize(streams[i_streams]);
}
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
cudaStreamDestroy(streams[i_streams]);
}
/// Copy Targets to Host
CUDA_SAFE(cudaMemcpy(&temp_targetComm_gpu[0],
dev_ModlocalTarget,
(num_vertex_gpu*sizeof(GraphElem)), cudaMemcpyDeviceToHost));
/// Copy clusterWeight to Host
CUDA_SAFE(cudaMemcpy(&clusterWeight[0],
dev_clusterWeight,
(num_vertex_gpu*sizeof(GraphWeight)), cudaMemcpyDeviceToHost));
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_kd2h = timer();
double time_kd2h = t_kd2h - t0;
std::cout << "me[" << me << "]; Time gpu_kd2h: " << time_kd2h << std::endl;
#endif
std::vector<GraphElem>().swap(ModcurrComm);
double t1 = timer();
time_gpu = t1 - t0;
#ifdef PRINT_HYBRID
std::cout << "me[" << me << "]; Time GPU: " << time_gpu << std::endl;
#endif
} // if (num_vertex_gpu > 0) condition
} // close gpu secton
} // close parallel
memcpy(&temp_targetComm_gpu[0+num_vertex_gpu],
&temp_targetComm_cpu[0+num_vertex_gpu],
num_vertex_cpu*sizeof(GraphElem));
omp_set_num_threads(14);
updateLocalTarget_gpu (
nv,
currComm,
targetComm,
vDegree,
remoteCupdate,
temp_targetComm_gpu,
localCupdate,
base, bound, numIters);
std::vector<GraphElem>().swap(temp_targetComm_cpu);
std::vector<GraphElem>().swap(temp_targetComm_gpu);
#else // else option runs GPU code below
omp_set_num_threads(14);
double t0 = timer();
// GPU only code
/// size equal to nv X GraphElem
int mem_size_GraphElem_nv = sizeof(GraphElem) * nv;
/// size equal to nv X GraphWeight
int mem_size_GraphWeight_nv = sizeof(GraphWeight) * nv;
GraphElem size_localCinfo = localCinfo.size();
/// split localCinfo into vectors for different struct elements
GraphElem* temp_ModlocalCinfo_size = gpu_graph.getPinned_ModlocalCinfo_size();
GraphWeight* temp_ModlocalCinfo_degree = gpu_graph.getPinned_ModlocalCinfo_degree();
#pragma omp parallel default(none), \
shared(localCinfo, temp_ModlocalCinfo_size, temp_ModlocalCinfo_degree)
#pragma omp for schedule(guided)
for(int ii=0; ii<localCinfo.size(); ii++) {
temp_ModlocalCinfo_size[ii] = localCinfo[ii].size;
temp_ModlocalCinfo_degree[ii] = localCinfo[ii].degree;
}
/// Remote Community Info
/// First get the keys of remoteCinfo map
std::vector<GraphElem> temp_remoteCinfo_key = extract_keys_CommMap(remoteCinfo);
GraphElem size_remoteCinfo = remoteCinfo.size();
/// split RemoteCinfo into vectors for different struct elements
std::vector<GraphElem>temp_remoteCinfo_size =
extract_value_CommMap_size(remoteCinfo);
std::vector<GraphWeight>temp_remoteCinfo_degree =
extract_value_CommMap_degree(remoteCinfo);
/// now modify currComm to include remoteComm
GraphElem* temp_ModlocalCinfo_oComm = gpu_graph.getPinned_ModlocalCinfo_oComm();
ClusterLocalMap localCinfo_to_remoteCinfo_map;
ClusterLocalMap::const_iterator storedAlready;
GraphElem temp_counter_01 = 0;
std::vector<GraphElem> ModcurrComm = currComm;
for(int ii=0; ii<temp_remoteCinfo_key.size(); ii++) {
GraphElem temp_Comm = temp_remoteCinfo_key[ii];
if(temp_Comm < base || temp_Comm >= bound)
{
storedAlready = localCinfo_to_remoteCinfo_map.find(temp_Comm);
if(storedAlready == localCinfo_to_remoteCinfo_map.end()) {
localCinfo_to_remoteCinfo_map.insert(std::make_pair(
temp_Comm, (temp_counter_01+bound)));
temp_ModlocalCinfo_size[size_localCinfo+temp_counter_01] = temp_remoteCinfo_size[ii];
temp_ModlocalCinfo_degree[size_localCinfo+temp_counter_01] = temp_remoteCinfo_degree[ii];
temp_ModlocalCinfo_oComm[temp_counter_01] = temp_Comm;
temp_counter_01++;
}
}
}
GraphElem size_ModlocalCinfo = size_localCinfo+temp_counter_01;
GraphElem size_ModlocalCinfo_oComm = temp_counter_01;
std::vector<GraphElem>().swap(temp_remoteCinfo_key);
CommunityVector().swap(temp_remoteCinfo_size);
GraphWeightVector().swap(temp_remoteCinfo_degree);
// remoteComm is broken into 2 arrays
std::vector<GraphElem> temp_remoteComm_v;
temp_remoteComm_v = extract_vertex_VertexCommMap(remoteComm);
std::vector<GraphElem> temp_remoteComm_comm;
temp_remoteComm_comm = extract_comm_VertexCommMap(remoteComm);
/// Create map for remoteComm tail mapped to currComm
ClusterLocalMap remoteComm_to_currComm_map_v;
ClusterLocalMap::const_iterator storedAlready_v;
ClusterLocalMap::const_iterator storedAlready_comm;
GraphElem temp_counter_02 = 0;
GraphElem temp_tail;
GraphElem temp_comm, temp_comm_mapped;
// First modify currComm
#pragma omp parallel default(none), \
shared(ModcurrComm, localCinfo_to_remoteCinfo_map), \
private(temp_comm, storedAlready_comm)
#pragma omp for schedule(guided)
for(int ii = 0; ii < ModcurrComm.size(); ii++) {
temp_comm = ModcurrComm[ii];
if(temp_comm < base || temp_comm >= bound) {
storedAlready_comm = localCinfo_to_remoteCinfo_map.find(temp_comm);
ModcurrComm[ii] = storedAlready_comm->second;
}
}
// Next modify currComm to include remoteComm
for(int ii=0; ii<temp_remoteComm_comm.size(); ii++) {
temp_comm = temp_remoteComm_comm[ii];
temp_tail = temp_remoteComm_v[ii];
if(temp_comm < base || temp_comm >= bound) {
storedAlready_comm = localCinfo_to_remoteCinfo_map.find(temp_comm);
temp_comm_mapped = storedAlready_comm->second;
temp_remoteComm_comm[ii] = temp_comm_mapped;
ModcurrComm.push_back(temp_comm_mapped);
} else {
ModcurrComm.push_back(temp_comm);
/// check line below
}
if(temp_tail < base || temp_tail >= bound) {
storedAlready_v = remoteComm_to_currComm_map_v.find(temp_tail);
if(storedAlready_v == remoteComm_to_currComm_map_v.end()) {
if(temp_tail < base || temp_tail >= bound) {
remoteComm_to_currComm_map_v.insert(std::make_pair(
temp_tail, (bound + temp_counter_02) ));
temp_remoteComm_v[ii] = bound + temp_counter_02;
}
temp_counter_02++;
}
}
}
std::vector<GraphElem>().swap(temp_remoteComm_v);
std::vector<GraphElem>().swap(temp_remoteComm_comm);
// comm_node_info remote_comm_info;
const Graph &g = dg.getLocalGraph();
GraphElem size_edgeListIndexes = g.edgeListIndexes.size();
GraphElem* temp_graph_edgeList_tail = gpu_graph.getPinned_edgeList_tail();
GraphWeight* temp_graph_edgeList_weight = gpu_graph.getPinned_edgeList_weight();
#pragma omp parallel default(none), shared(g, temp_graph_edgeList_tail, \
temp_graph_edgeList_weight, remoteComm_to_currComm_map_v), \
private(storedAlready)
#pragma omp for schedule(guided)
for(int ii=0; ii<g.edgeList.size(); ii++) {
ClusterLocalMap edgeList_tail_map;
GraphElem temp_tail = g.edgeList[ii].tail;
temp_graph_edgeList_tail[ii] = temp_tail;
temp_graph_edgeList_weight[ii] = g.edgeList[ii].weight;
if(temp_tail < base || temp_tail >= bound) {
/// use remoteComm_to_currComm_map_v map instead
storedAlready = remoteComm_to_currComm_map_v.find(temp_tail);
if(storedAlready != edgeList_tail_map.end()) {
temp_graph_edgeList_tail[ii] = storedAlready->second;
}
}
}
#ifdef PRINT_TIMEDS
double t_remap = timer();
double time_remap = t_remap - t0;
std::cout << "me[" << me << "]; Time GPU_remap: " << time_remap << std::endl;
#endif
/// Get pointers to memory of device arrays
GraphElem* dev_currComm = gpu_graph.get_currComm();
GraphElem* dev_ModlocalTarget = gpu_graph.get_ModlocalTarget();
GraphWeight* dev_vDegree = gpu_graph.get_vDegree();
GraphWeight* dev_clusterWeight = gpu_graph.get_clusterWeight();
GraphElem* dev_edgeListIndexes = gpu_graph.get_edgeListIndexes();
GraphElem* dev_ModcurrComm = gpu_graph.get_ModcurrComm();
GraphElem* dev_localCinfo_size = gpu_graph.get_ModlocalCinfo_size();
GraphWeight* dev_localCinfo_degree = gpu_graph.get_ModlocalCinfo_degree();
GraphElem* dev_localCinfo_oComm = gpu_graph.get_ModlocalCinfo_oComm();
GraphElem* dev_graph_edgeList_tail = gpu_graph.get_edgeList_tail();
GraphWeight* dev_graph_edgeList_weight = gpu_graph.get_edgeList_weight();
GraphElem* dev_unique_comm_array = gpu_graph.get_unique_comm_array();
GraphWeight* dev_unique_weight_array = gpu_graph.get_unique_weight_array();
gpu_graph.cpyVecTodev(currComm, dev_currComm);
gpu_graph.cpyVecTodev(vDegree, dev_vDegree);
gpu_graph.cpyVecTodev(clusterWeight, dev_clusterWeight);
#ifdef DEBUG_CUVITE
std::cout << "nv[" << nv << "]; size_edgeListIndexes["
<< g.edgeListIndexes.size() << "]" << std::endl;
#endif
gpu_graph.cpyVecTodev(g.edgeListIndexes, dev_edgeListIndexes);
#ifdef DEBUG_CUVITE
std::cout << "nv[" << nv << "]; size_ModlocalCinfo["
<< size_ModlocalCinfo << "]; size_ModlocalCinfo_oComm["
<< size_ModlocalCinfo_oComm << "]" << std::endl;
#endif
bool check_ModlocalCinfo_memory = gpu_graph.checkModCommMemory(size_ModlocalCinfo);
assert(check_ModlocalCinfo_memory);
bool check_ModlocalCinfoComm_memory = gpu_graph.checkModCommMemory(size_ModlocalCinfo_oComm);
assert(check_ModlocalCinfoComm_memory);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_size, dev_localCinfo_size, size_ModlocalCinfo);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_degree, dev_localCinfo_degree, size_ModlocalCinfo);
gpu_graph.cpyArrayTodev(temp_ModlocalCinfo_oComm, dev_localCinfo_oComm, size_ModlocalCinfo_oComm);
gpu_graph.cpyArrayTodev(temp_graph_edgeList_tail, dev_graph_edgeList_tail,
(GraphElem)g.edgeList.size());
gpu_graph.cpyArrayTodev(temp_graph_edgeList_weight, dev_graph_edgeList_weight,
(GraphElem)g.edgeList.size());
#ifdef DEBUG_CUVITE
std::cout << "nv[" << nv << "]; size_ModcurrComm["
<< ModcurrComm.size() << "]" << std::endl;
#endif
bool check_ModcurrComm_memory = gpu_graph.checkModCommMemory(
(GraphElem)ModcurrComm.size());
assert(check_ModcurrComm_memory);
gpu_graph.cpyVecTodev(ModcurrComm, dev_ModcurrComm);
GraphElem* dev_GraphEdge_low = gpu_graph.get_GraphEdge_low();
GraphElem* dev_GraphEdge_high = gpu_graph.get_GraphEdge_high();
/// allocate device memory for filling in comm and weights
GraphElem* dev_clmap_comm = gpu_graph.get_clmap_comm();
GraphWeight* dev_clmap_weight = gpu_graph.get_clmap_weight();
GraphElem clmapSize;
GraphElem* dev_clmap_loc = gpu_graph.get_clmap_loc();
GraphElem* dev_List_numEdges = gpu_graph.get_List_numEdges();
GraphElem* dev_list_lt_ts = gpu_graph.get_dev_list_lt_ts();
GraphElem* dev_list_lt_cs1 = gpu_graph.get_dev_list_lt_cs1();
GraphElem* dev_list_lt_cs2 = gpu_graph.get_dev_list_lt_cs2();
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_dtrans = timer();
double time_dtrans = t_dtrans - t0;
std::cout << "me[" << me << "]; Time GPU_dtrans: " << time_dtrans << std::endl;
#endif
if(numIters == 1)
{
CUDA_SAFE(cudaMemset(dev_GraphEdge_low, 0, mem_size_GraphElem_nv));
CUDA_SAFE(cudaMemset(dev_GraphEdge_high, 0, mem_size_GraphElem_nv));
dim3 numBlocks01( (nv-1) / L_THREADBLOCK_SIZE + 1);
dim3 Block_dim01(L_THREADBLOCK_SIZE);
gpu_distExecuteLouvainIteration<<<numBlocks01,Block_dim01>>>(
nv,
dev_edgeListIndexes,
dev_GraphEdge_low, dev_GraphEdge_high,
me, base, bound);
CUDA_SAFE(cudaMemset(dev_List_numEdges, 0, mem_size_GraphElem_nv));
CUDA_SAFE(cudaMemset(dev_clmap_loc, 0, mem_size_GraphElem_nv));
GraphElem* dev_NumClusters = gpu_graph.get_NumClusters();
CUDA_SAFE(cudaMemset(dev_NumClusters, 0, sizeof(GraphElem)));
GraphElem* dev_size_clmap = gpu_graph.get_size_clmap();
CUDA_SAFE(cudaMemset(dev_size_clmap, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_ts = gpu_graph.get_dev_size_lt_ts();
CUDA_SAFE(cudaMemset(dev_size_lt_ts, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_cs1 = gpu_graph.get_dev_size_lt_cs1();
CUDA_SAFE(cudaMemset(dev_size_lt_cs1, 0, sizeof(GraphElem)));
GraphElem* dev_size_lt_cs2 = gpu_graph.get_dev_size_lt_cs2();
CUDA_SAFE(cudaMemset(dev_size_lt_cs2, 0, sizeof(GraphElem)));
CUDA_SAFE(cudaMemset(dev_list_lt_ts, 0, mem_size_GraphElem_nv));
CUDA_SAFE(cudaMemset(dev_list_lt_cs1, 0, mem_size_GraphElem_nv));
CUDA_SAFE(cudaMemset(dev_list_lt_cs2, 0, mem_size_GraphElem_nv));
dim3 numBlocks02( (nv-1) / MS_THREADBLOCK_SIZE + 1);
dim3 Block_dim02(MS_THREADBLOCK_SIZE);
count_size_clmap<PHY_WRP_SZ><<<numBlocks02,Block_dim02>>>(nv, dev_NumClusters,
dev_clmap_loc, dev_size_clmap,
dev_size_lt_ts, dev_list_lt_ts,
dev_size_lt_cs1, dev_list_lt_cs1,
dev_size_lt_cs2, dev_list_lt_cs2,
dev_GraphEdge_low, dev_GraphEdge_high, dev_List_numEdges);
/// copy to host number of clusters and size of cluster map memory
#ifdef DEBUG_CUVITE
GraphElem NumClusters = 0;
CUDA_SAFE(cudaMemcpy(&NumClusters, dev_NumClusters,
sizeof(GraphElem), cudaMemcpyDeviceToHost));
#endif
CUDA_SAFE(cudaMemcpy(&clmapSize, dev_size_clmap,
sizeof(GraphElem), cudaMemcpyDeviceToHost));
gpu_graph.set_clmapSize(clmapSize);
dev_clmap_comm = gpu_graph.getDevMem_clmapComm(clmapSize);
dev_clmap_weight = gpu_graph.getDevMem_clmapWeight(clmapSize);
gpu_graph.set_size_lt_ts();
gpu_graph.set_size_lt_cs1();
gpu_graph.set_size_lt_cs2();
}
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_iter1 = timer();
double time_iter1 = t_iter1 - t0;
std::cout << "me[" << me << "]; Time GPU_iter1: " << time_iter1 << std::endl;
#endif
GraphElem size_lt_ts = gpu_graph.get_size_lt_ts();
GraphElem size_lt_cs1 = gpu_graph.get_size_lt_cs1();
GraphElem size_lt_cs2 = gpu_graph.get_size_lt_cs2();
clmapSize = gpu_graph.get_clmapSize();
CUDA_SAFE(cudaMemset(dev_clmap_comm, 0,
clmapSize * sizeof(GraphElem)));
CUDA_SAFE(cudaMemset(dev_clmap_weight, 0,
clmapSize * sizeof(GraphWeight)));
GraphWeight* dev_selfLoopVec = gpu_graph.get_selfLoopVec();
CUDA_SAFE(cudaMemset(dev_selfLoopVec, 0, mem_size_GraphWeight_nv));
dim3 numBlocks03( (nv-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim03(S_THREADBLOCK_SIZE);
distBuildLocalMapCounter<PHY_WRP_SZ><<<numBlocks03,Block_dim03>>>(
nv, dev_GraphEdge_low, dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_List_numEdges, dev_selfLoopVec,
base); // , bound);
GraphElem* dev_uniq_clus_vec = gpu_graph.get_uniq_clus_vec();
CUDA_SAFE(cudaMemset(dev_uniq_clus_vec, 0, mem_size_GraphElem_nv));
GraphWeight* dev_counter = gpu_graph.get_counter();
CUDA_SAFE(cudaMemset(dev_counter, 0, mem_size_GraphWeight_nv));
const int num_streams = 2;
cudaStream_t streams[num_streams];
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
CUDA_SAFE(cudaStreamCreate(&streams[i_streams]) );
}
dim3 numBlocks05( (nv-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim05(S_THREADBLOCK_SIZE);
distGetMaxIndex<PHY_WRP_SZ><<<numBlocks05,Block_dim05, 0>>>(
nv,
dev_GraphEdge_low, // dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base); // , bound);
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_kernel11 = timer();
double time_kernel11 = t_kernel11 - t0;
std::cout << "me[" << me << "]; Time GPU_kernel11: " << time_kernel11 << std::endl;
#endif
#if 0
distGetMaxIndex_large<PHY_WRP_SZ><<<numBlocks05,Block_dim05, 0, streams[1]>>>(
nv,
dev_GraphEdge_low, dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base, bound);
#else
if(size_lt_cs2 > 0) {
dim3 numBlocks052(FINDING_UNIQCOMM_NUM_BLOCKS);
dim3 Block_dim052(FINDING_UNIQCOMM_BLOCK_SIZE);
GraphElem nv_chunk_size;
nv_chunk_size = (size_lt_cs2 - 1) / FINDING_UNIQCOMM_NUM_BLOCKS + 1;
assert(ModcurrComm.size() <= FINDING_UNIQCOMM_ARRAY_SIZE);
distGetMaxIndex_large_new<PHY_WRP_SZ><<<numBlocks052,Block_dim052, 0, streams[0]>>>(
me, numIters,
nv, nv_chunk_size,
size_lt_cs2, dev_list_lt_cs2,
ModcurrComm.size(), // size_ModlocalCinfo,
dev_unique_comm_array,
dev_unique_weight_array,
dev_GraphEdge_low, // dev_GraphEdge_high,
dev_graph_edgeList_tail,
dev_graph_edgeList_weight,
dev_ModcurrComm,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_List_numEdges,
dev_uniq_clus_vec, dev_counter,
base); // , bound);
}
#endif
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_kernel1 = timer();
double time_kernel1 = t_kernel1 - t0;
std::cout << "me[" << me << "]; Time GPU_kernel1: " << time_kernel1 << std::endl;
#endif
CUDA_SAFE(cudaMemcpy(dev_ModlocalTarget, dev_ModcurrComm,
sizeof(GraphElem)*ModcurrComm.size(), cudaMemcpyDeviceToDevice));
dim3 numBlocks06( (nv-1) / S_THREADBLOCK_SIZE + 1);
dim3 Block_dim06(S_THREADBLOCK_SIZE);
computeMaxIndex<PHY_WRP_SZ><<<numBlocks06,Block_dim06, 0>>>(
nv,
dev_currComm,
dev_ModcurrComm,
dev_localCinfo_size,
dev_localCinfo_degree,
dev_localCinfo_oComm,
dev_selfLoopVec,
dev_uniq_clus_vec, dev_counter,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_vDegree,
dev_ModlocalTarget,
dev_clusterWeight,
constantForSecondTerm,
base, bound);
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_kernel21 = timer();
double time_kernel21 = t_kernel21 - t0;
std::cout << "me[" << me << "]; Time GPU_kernel21: " << time_kernel21 << std::endl;
#endif
computeMaxIndex_large<PHY_WRP_SZ><<<numBlocks06,Block_dim06, 0, streams[0]>>>(
nv,
dev_currComm,
dev_ModcurrComm,
dev_localCinfo_size,
dev_localCinfo_degree,
dev_localCinfo_oComm,
dev_selfLoopVec,
dev_uniq_clus_vec, dev_counter,
dev_clmap_loc,
dev_clmap_comm, dev_clmap_weight,
dev_vDegree,
dev_ModlocalTarget,
dev_clusterWeight,
constantForSecondTerm,
base, bound);
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_kernels = timer();
double time_kernels = t_kernels - t0;
std::cout << "me[" << me << "]; Time GPU_kernels: " << time_kernels << std::endl;
#endif
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
cudaStreamSynchronize(streams[i_streams]);
}
for(auto i_streams = 0; i_streams < num_streams; i_streams++) {
cudaStreamDestroy(streams[i_streams]);
}
/// Copy Targets to Host
CUDA_SAFE(cudaMemcpy(&ModcurrComm[0], dev_ModlocalTarget,
(ModcurrComm.size()*sizeof(GraphElem)), cudaMemcpyDeviceToHost));
/// Copy clusterWeight to Host
CUDA_SAFE(cudaMemcpy(&clusterWeight[0],
dev_clusterWeight,
(clusterWeight.size()*sizeof(GraphWeight)), cudaMemcpyDeviceToHost));
#ifdef PRINT_TIMEDS
cudaDeviceSynchronize();
double t_kd2h = timer();
double time_kd2h = t_kd2h - t0;
std::cout << "me[" << me << "]; Time GPU_kd2h: " << time_kd2h << std::endl;
#endif
updateLocalTarget_gpu (
nv,
currComm,
targetComm,
vDegree,
remoteCupdate,
ModcurrComm,
localCupdate,
base, bound, numIters);
#ifdef PRINT_TIMEDS
double t_locupd = timer();
double time_locupd = t_locupd - t0;
std::cout << "me[" << me << "]; Time GPU_locupd: " << time_locupd << std::endl;
#endif
std::vector<GraphElem>().swap(ModcurrComm);
#ifdef PRINT_TIMEDS
double t_all = timer();
double time_all = t_all - t0;
std::cout << "me[" << me << "]; Time GPU_all: " << time_all << std::endl;
#endif
#endif // end of option to run hybrid or GPU-only code
return 1;
}
|
687a80563d1dc8fb7581816e02b045d0c307464c.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "UniformGridSortBuilder.h"
#include "Algebra.h"
#include "UniformGrid.h"
#include "Primitive.h"
#include "BBox.h"
#include <thrust/device_allocator.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include "DebugUtils.h"
#include "Timer.h"
class FragmentCounter
{
public:
int gridRes[3];
const float3 minBound;
const float3 cellSize;
const float3 cellSizeRCP;
thrust::device_ptr<float3> vertexArray;
FragmentCounter(
int aGridResX, int aGridResY, int aGridResZ,
float3 aMinBound,
float3 aCellSize,
float3 aCellSizeRCP,
thrust::device_ptr<float3> aVtxArray):
minBound(aMinBound),
cellSize(aCellSize),
cellSizeRCP(aCellSizeRCP),
vertexArray(aVtxArray)
{
gridRes[0] = aGridResX;
gridRes[1] = aGridResY;
gridRes[2] = aGridResZ;
}
__host__ __device__ int operator()(const uint3& aTriVtxIds)
{
Triangle prim;
prim.vtx[0] = vertexArray[aTriVtxIds.x];
prim.vtx[1] = vertexArray[aTriVtxIds.y];
prim.vtx[2] = vertexArray[aTriVtxIds.z];
BBox bounds = BBoxExtractor<Triangle>::get(prim);
float3 minCellIdf = (bounds.vtx[0] - minBound - cellSize * 0.001f) * cellSizeRCP;
const float3 maxCellIdPlus1f = (bounds.vtx[1] - minBound + cellSize * 0.001f) * cellSizeRCP + rep(1.f);
const int minCellIdX = max(0, (int)(minCellIdf.x));
const int minCellIdY = max(0, (int)(minCellIdf.y));
const int minCellIdZ = max(0, (int)(minCellIdf.z));
const int maxCellIdP1X = min(gridRes[0], (int)(maxCellIdPlus1f.x));
const int maxCellIdP1Y = min(gridRes[1], (int)(maxCellIdPlus1f.y));
const int maxCellIdP1Z = min(gridRes[2], (int)(maxCellIdPlus1f.z));
const int numCells =
(maxCellIdP1X - minCellIdX)
* (maxCellIdP1Y - minCellIdY)
* (maxCellIdP1Z - minCellIdZ);
return numCells;
}
};
//////////////////////////////////////////////////////////////////////////
//axis tests
//////////////////////////////////////////////////////////////////////////
__host__ __device__ bool axisTest(
const float a,
const float b,
const float fa,
const float fb,
const float v0a,
const float v0b,
const float v1a,
const float v1b,
const float aCellSizeHALFa,
const float aCellSizeHALFb)
{
const float p0 = a * v0a + b * v0b;
const float p1 = a * v1a + b * v1b;
const float minP = fminf(p0, p1);
const float maxP = fmaxf(p0, p1);
const float rad = fa * aCellSizeHALFa + fb * aCellSizeHALFb;
return !(minP > rad + EPS || maxP + EPS < -rad);
}
#define AXISTEST_X01(e, fe, v0, v1, v2, s) \
axisTest(e.z, -e.y, fe.z, fe.y, v0.y, v0.z, v2.y, v2.z, s.y, s.z)
#define AXISTEST_X2(e, fe, v0, v1, v2, s) \
axisTest(e.z, -e.y, fe.z, fe.y, v0.y, v0.z, v1.y, v1.z, s.y, s.z)
#define AXISTEST_Y02(e, fe, v0, v1, v2, s) \
axisTest(-e.z, e.x, fe.z, fe.x, v0.x, v0.z, v2.x, v2.z, s.x, s.z)
#define AXISTEST_Y1(e, fe, v0, v1, v2, s) \
axisTest(-e.z, e.x, fe.z, fe.x, v0.x, v0.z, v1.x, v1.z, s.x, s.z)
#define AXISTEST_Z12(e, fe, v0, v1, v2, s) \
axisTest(e.y, -e.x, fe.y, fe.x, v1.x, v1.y, v2.x, v2.y, s.x, s.y)
#define AXISTEST_Z0(e, fe, v0, v1, v2, s) \
axisTest(e.y, -e.x, fe.y, fe.x, v0.x, v0.y, v1.x, v1.y, s.x, s.y)
//////////////////////////////////////////////////////////////////////////
class FragmentWriter
{
public:
int gridRes[3];
const float3 minBound;
const float3 cellSize;
const float3 cellSizeRCP;
thrust::device_ptr<float3> vertexArray;
thrust::device_ptr<uint> outKeys;
thrust::device_ptr<uint> outValues;
FragmentWriter(
int aGridResX, int aGridResY, int aGridResZ,
float3 aMinBound,
float3 aCellSize,
float3 aCellSizeRCP,
thrust::device_ptr<float3> aVtxArray,
thrust::device_ptr<uint> aKeys,
thrust::device_ptr<uint> aVals
) :
minBound(aMinBound),
cellSize(aCellSize),
cellSizeRCP(aCellSizeRCP),
vertexArray(aVtxArray),
outKeys(aKeys),
outValues(aVals)
{
gridRes[0] = aGridResX;
gridRes[1] = aGridResY;
gridRes[2] = aGridResZ;
}
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
const uint3 aTriVtxIds = thrust::get<0>(t);
const unsigned int startPosition = thrust::get<1>(t);
const size_t triangleId = thrust::get<2>(t);
Triangle triangle;
triangle.vtx[0] = vertexArray[aTriVtxIds.x];
triangle.vtx[1] = vertexArray[aTriVtxIds.y];
triangle.vtx[2] = vertexArray[aTriVtxIds.z];
BBox bounds = BBoxExtractor<Triangle>::get(triangle);
float3 minCellIdf = (bounds.vtx[0] - minBound - cellSize * 0.001f) * cellSizeRCP;
const float3 maxCellIdPlus1f = (bounds.vtx[1] - minBound + cellSize * 0.001f) * cellSizeRCP + rep(1.f);
const int minCellIdX = max(0, (int)(minCellIdf.x));
const int minCellIdY = max(0, (int)(minCellIdf.y));
const int minCellIdZ = max(0, (int)(minCellIdf.z));
const int maxCellIdP1X = min(gridRes[0], (int)(maxCellIdPlus1f.x));
const int maxCellIdP1Y = min(gridRes[1], (int)(maxCellIdPlus1f.y));
const int maxCellIdP1Z = min(gridRes[2], (int)(maxCellIdPlus1f.z));
unsigned int nextSlot = startPosition;
const float3 normal =
~((triangle.vtx[1] - triangle.vtx[0]) %
(triangle.vtx[2] - triangle.vtx[0]));
const float3 gridCellSizeHALF = cellSize * 0.505f; //1% extra as epsilon
float3 minCellCenter;
minCellCenter.x = (float)(minCellIdX);
minCellCenter.y = (float)(minCellIdY);
minCellCenter.z = (float)(minCellIdZ);
minCellCenter = minCellCenter * cellSize;
minCellCenter = minCellCenter + minBound + gridCellSizeHALF;
float3 cellCenter;
cellCenter.z = minCellCenter.z - cellSize.z;
for (int z = minCellIdZ; z < maxCellIdP1Z; ++z)
{
cellCenter.z += cellSize.z;
cellCenter.y = minCellCenter.y - cellSize.y;
for (int y = minCellIdY; y < maxCellIdP1Y; ++y)
{
cellCenter.y += cellSize.y;
cellCenter.x = minCellCenter.x - cellSize.x;
for (int x = minCellIdX; x < maxCellIdP1X; ++x, ++nextSlot)
{
cellCenter.x += cellSize.x;
//////////////////////////////////////////////////////////////////////////
//coordinate transform origin -> cellCenter
const float3 v0 = triangle.vtx[0] - cellCenter;
const float3 v1 = triangle.vtx[1] - cellCenter;
const float3 v2 = triangle.vtx[2] - cellCenter;
const float3 e0 = v1 - v0;
const float3 e1 = v2 - v1;
const float3 e2 = v0 - v2;
bool passedAllTests = true;
//////////////////////////////////////////////////////////////////////////
//Plane/box overlap test
float3 vmin, vmax;
vmin.x = (normal.x > 0.f) ? -gridCellSizeHALF.x : gridCellSizeHALF.x;
vmin.y = (normal.y > 0.f) ? -gridCellSizeHALF.y : gridCellSizeHALF.y;
vmin.z = (normal.z > 0.f) ? -gridCellSizeHALF.z : gridCellSizeHALF.z;
vmax = -vmin;
vmax = vmax - v0;
vmin = vmin - v0;
passedAllTests = passedAllTests && dot(normal, vmin) <= 0.f && dot(normal, vmax) > 0.f;
//Note: early exit here makes the code slower (CUDA 7.5, GTX 970)
//////////////////////////////////////////////////////////////////////////
//9 tests for separating axis
float3 fe;
fe.x = fabsf(e0.x);
fe.y = fabsf(e0.y);
fe.z = fabsf(e0.z);
passedAllTests = passedAllTests && AXISTEST_X01(e0, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Y02(e0, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Z12(e0, fe, v0, v1, v2, gridCellSizeHALF);
fe.x = fabsf(e1.x);
fe.y = fabsf(e1.y);
fe.z = fabsf(e1.z);
passedAllTests = passedAllTests && AXISTEST_X01(e1, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Y02(e1, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Z0(e1, fe, v0, v1, v2, gridCellSizeHALF);
fe.x = fabsf(e2.x);
fe.y = fabsf(e2.y);
fe.z = fabsf(e2.z);
passedAllTests = passedAllTests && AXISTEST_X2(e2, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Y1(e2, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Z12(e2, fe, v0, v1, v2, gridCellSizeHALF);
if (!passedAllTests)
{
outKeys[nextSlot] = (uint)(gridRes[0] * gridRes[1] * gridRes[2]);
outValues[nextSlot] = (uint)triangleId;
continue;
}
outKeys[nextSlot] = x + y * gridRes[0] + z * (gridRes[0] * gridRes[1]);
outValues[nextSlot] = (uint)triangleId;
}//end for z
}//end for y
}//end for x
}
};
class CellExtractor
{
public:
int gridRes[3];
uint* cells_ptr;
CellExtractor(
int aGridResX, int aGridResY, int aGridResZ,
uint2* aCellsPtr)
{
gridRes[0] = aGridResX;
gridRes[1] = aGridResY;
gridRes[2] = aGridResZ;
cells_ptr = (uint*)aCellsPtr;
}
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
const unsigned int myCellIndex = thrust::get<0>(t);
const unsigned int nextCellIndex = thrust::get<1>(t);
const size_t myId = thrust::get<2>(t);
if (myCellIndex >= (unsigned int)gridRes[0] * gridRes[1] * gridRes[2])
return;
if (myCellIndex != nextCellIndex)
{
//end of range for the cell at myCellIndex
cells_ptr[2u * myCellIndex + 1u] = (unsigned int)myId + 1u;
//start of range for the cell at nextCellIndex
if (nextCellIndex < (unsigned int)gridRes[0] * gridRes[1] * gridRes[2])
cells_ptr[2u * nextCellIndex] = (unsigned int)myId + 1u;
}
}
};
__host__ UniformGrid UniformGridSortBuilder::build(WFObject & aGeometry, const int aResX, const int aResY, const int aResZ)
{
cudastd::timer timer;
UniformGrid oGrid;
//initialize grid resolution
oGrid.res[0] = thrust::max<int>(aResX, 1);
oGrid.res[1] = thrust::max<int>(aResY, 1);
oGrid.res[2] = thrust::max<int>(aResZ, 1);
//allocate grid cells
oGrid.cells = thrust::device_new<uint2>(oGrid.res[0] * oGrid.res[1] * oGrid.res[2]);
//initialize empy cells
thrust::device_ptr<uint2> dev_ptr_uint2 = oGrid.cells;
uint2 * raw_ptr_uint2 = thrust::raw_pointer_cast(dev_ptr_uint2);
uint * raw_ptr_uint = (uint*)raw_ptr_uint2;
thrust::device_ptr<uint> dev_ptr_uint(raw_ptr_uint);
thrust::fill(dev_ptr_uint, dev_ptr_uint + 2 * oGrid.res[0] * oGrid.res[1] * oGrid.res[2], 0u);
//compute vertex index buffer for the triangles
thrust::host_vector<uint3> host_indices(aGeometry.faces.size());
for (size_t i = 0; i < aGeometry.faces.size(); i++)
{
host_indices[i].x = (unsigned int)aGeometry.faces[i].vert1;
host_indices[i].y = (unsigned int)aGeometry.faces[i].vert2;
host_indices[i].z = (unsigned int)aGeometry.faces[i].vert3;
}
//copy the vertex index buffer to the device
thrust::device_vector<uint3> device_indices(aGeometry.faces.size());
thrust::copy(host_indices.begin(), host_indices.end(), device_indices.begin());
//copy the vertex buffer to the device
thrust::device_vector<float3> device_vertices(aGeometry.vertices.begin(), aGeometry.vertices.end());
//compute scene bounding box
oGrid.vtx[0] = thrust::reduce(device_vertices.begin(), device_vertices.end(), make_float3( FLT_MAX, FLT_MAX, FLT_MAX), binary_float3_min());
oGrid.vtx[1] = thrust::reduce(device_vertices.begin(), device_vertices.end(), make_float3(-FLT_MAX, -FLT_MAX,- FLT_MAX), binary_float3_max());
const float boundsDiagonal = len(oGrid.vtx[1] - oGrid.vtx[0]);
const float myEpsilon = boundsDiagonal * 0.0005773f;//0.57735026918962576450914878050196 = 1.0 / sqrt(3.0)
//extend scene bounding box with epsilon in each dimension
oGrid.vtx[0] -= make_float3(myEpsilon, myEpsilon, myEpsilon);
oGrid.vtx[1] += make_float3(myEpsilon, myEpsilon, myEpsilon);
//count triangle-cell intersections
thrust::device_vector<unsigned int> fragment_counts(device_indices.size() + 1, 0u);
FragmentCounter frag_count(
oGrid.res[0], oGrid.res[1], oGrid.res[2],
oGrid.vtx[0],
oGrid.getCellSize(),
oGrid.getCellSizeRCP(),
device_vertices.data()
);
thrust::transform(device_indices.begin(), device_indices.end(), fragment_counts.begin(), frag_count);
thrust::exclusive_scan(fragment_counts.begin(), fragment_counts.end(), fragment_counts.begin());
//#ifdef _DEBUG
// outputDeviceVector("Scanned counts: ", fragment_counts);
//#endif
size_t num_fragments = fragment_counts[device_indices.size()];
//allocate cell index and triangle index buffers
thrust::device_vector<uint> fragment_keys(num_fragments);
oGrid.primitives = thrust::device_new<uint> (num_fragments);//fragment_vals
oGrid.numRefs = (unsigned)num_fragments;
//write triangle-cell pairs
FragmentWriter frag_write(
oGrid.res[0], oGrid.res[1], oGrid.res[2],
oGrid.vtx[0],
oGrid.getCellSize(),
oGrid.getCellSizeRCP(),
device_vertices.data(),
fragment_keys.data(),
oGrid.primitives
);
thrust::counting_iterator<size_t> first(0u);
thrust::counting_iterator<size_t> last(device_indices.size());
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(device_indices.begin(), fragment_counts.begin(), first)),
thrust::make_zip_iterator(thrust::make_tuple(device_indices.end(), fragment_counts.end() - 1u, last)),
frag_write);
//sort the pairs
thrust::sort_by_key(fragment_keys.begin(), fragment_keys.end(), oGrid.primitives);
//#ifdef _DEBUG
// outputDeviceVector("sorted keys: ", fragment_keys);
// outputDeviceVector("sorted vals: ", oGrid.primitives);
//#endif
//initilize the grid cells
CellExtractor extract_ranges(
oGrid.res[0], oGrid.res[1], oGrid.res[2],
thrust::raw_pointer_cast(oGrid.cells)
);
thrust::counting_iterator<size_t> first_pair(0u);
thrust::counting_iterator<size_t> last_pair(num_fragments - 1);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(fragment_keys.begin(), fragment_keys.begin() + 1, first_pair)),
thrust::make_zip_iterator(thrust::make_tuple(fragment_keys.end() - 1, fragment_keys.end(), last_pair)),
extract_ranges);
//#ifdef _DEBUG
// thrust::device_vector<unsigned int> cells_x(oGrid.cells.size());
// thrust::device_vector<unsigned int> cells_y(oGrid.cells.size());
// thrust::transform(oGrid.cells.begin(), oGrid.cells.end(), cells_x.begin(), uint2_get_x());
// thrust::transform(oGrid.cells.begin(), oGrid.cells.end(), cells_y.begin(), uint2_get_y());
// outputDeviceVector("Grid cells x: ", cells_x);
// outputDeviceVector("Grid cells y: ", cells_y);
//#endif
totalTime = timer.get();
timer.cleanup();
resX = aResX;
resY = aResY;
resZ = aResZ;
return oGrid;
}
__host__ int UniformGridSortBuilder::test(UniformGrid& aGrid, WFObject & aGeometry)
{
const size_t numCells = (size_t)(aGrid.res[0] * aGrid.res[1] * aGrid.res[2]);
thrust::host_vector<uint2> host_cells(numCells);
thrust::copy(aGrid.cells, aGrid.cells + numCells, host_cells.begin());
thrust::host_vector<unsigned int> host_primitives(aGrid.numRefs);
thrust::copy(aGrid.primitives, aGrid.primitives + aGrid.numRefs, host_primitives.begin());
for (int z = 0; z < aGrid.res[2]; ++z)
{
for (int y = 0; y < aGrid.res[1]; ++y)
{
for (int x = 0; x < aGrid.res[0]; ++x)
{
uint2 cell = host_cells[x + y * aGrid.res[0] + z * aGrid.res[0] * aGrid.res[1]];
if (cell.x > cell.y)
{
std::cerr << "Grid cell (" << x << ", " << y << ", " << z << ") has invalid range (" << cell.x << ", " << cell.y << ")\n";
return 1;
}
for (size_t refId = cell.x; refId < cell.y; ++refId)
{
if (refId >= host_primitives.size())
{
std::cerr << "Ivalid primitive reference " << refId << " in cell (" << x << ", " << y << ", " << z << ")\n";
return 2;
}
unsigned int triId = host_primitives[refId];
if (triId >= aGeometry.faces.size())
{
std::cerr << "Ivalid primitive index " << triId << " in cell (" << x << ", " << y << ", " << z << ")\n";
return 3;
}
Triangle prim;
prim.vtx[0] = aGeometry.vertices[(unsigned int)aGeometry.faces[triId].vert1];
prim.vtx[1] = aGeometry.vertices[(unsigned int)aGeometry.faces[triId].vert2];
prim.vtx[2] = aGeometry.vertices[(unsigned int)aGeometry.faces[triId].vert3];
BBox bounds = BBoxExtractor<Triangle>::get(prim);
float3 minCellIdf = (bounds.vtx[0] - aGrid.vtx[0] - aGrid.getCellSize() * 0.001f) * aGrid.getCellSizeRCP();
const float3 maxCellIdPlus1f = (bounds.vtx[1] - aGrid.vtx[0] + aGrid.getCellSize() * 0.001f) * aGrid.getCellSizeRCP() + rep(1.f);
const int minCellIdX = ::max(0, (int)(minCellIdf.x));
const int minCellIdY = ::max(0, (int)(minCellIdf.y));
const int minCellIdZ = ::max(0, (int)(minCellIdf.z));
const int maxCellIdP1X = ::min(aGrid.res[0], (int)(maxCellIdPlus1f.x));
const int maxCellIdP1Y = ::min(aGrid.res[1], (int)(maxCellIdPlus1f.y));
const int maxCellIdP1Z = ::min(aGrid.res[2], (int)(maxCellIdPlus1f.z));
if (x < minCellIdX || maxCellIdP1X <= x ||
y < minCellIdY || maxCellIdP1Y <= y ||
z < minCellIdZ || maxCellIdP1Z <= z )
{
std::cerr << "Primitive " << triId << " inserted in wrong cell (" << x << ", " << y << ", " << z << ")\n";
return 4;
}
}
}
}
}
for (size_t primId = 0; primId < aGeometry.faces.size(); ++primId)
{
bool inserted = false;
for (size_t refId = 0; refId < host_primitives.size(); ++refId)
{
if (host_primitives[refId] == primId)
{
inserted = true;
break;
}
}
if (!inserted)
{
std::cerr << "Primitive " << primId << " not inserted in the grid!\n";
return 5;
}
}
return 0;
}
__host__ void UniformGridSortBuilder::stats()
{
std::cerr << "[" << resX << " x " << resY << " x " << resZ << "] grid build in " << totalTime << "ms\n";
}
| 687a80563d1dc8fb7581816e02b045d0c307464c.cu | #include "pch.h"
#include "UniformGridSortBuilder.h"
#include "Algebra.h"
#include "UniformGrid.h"
#include "Primitive.h"
#include "BBox.h"
#include <thrust/device_allocator.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include "DebugUtils.h"
#include "Timer.h"
class FragmentCounter
{
public:
int gridRes[3];
const float3 minBound;
const float3 cellSize;
const float3 cellSizeRCP;
thrust::device_ptr<float3> vertexArray;
FragmentCounter(
int aGridResX, int aGridResY, int aGridResZ,
float3 aMinBound,
float3 aCellSize,
float3 aCellSizeRCP,
thrust::device_ptr<float3> aVtxArray):
minBound(aMinBound),
cellSize(aCellSize),
cellSizeRCP(aCellSizeRCP),
vertexArray(aVtxArray)
{
gridRes[0] = aGridResX;
gridRes[1] = aGridResY;
gridRes[2] = aGridResZ;
}
__host__ __device__ int operator()(const uint3& aTriVtxIds)
{
Triangle prim;
prim.vtx[0] = vertexArray[aTriVtxIds.x];
prim.vtx[1] = vertexArray[aTriVtxIds.y];
prim.vtx[2] = vertexArray[aTriVtxIds.z];
BBox bounds = BBoxExtractor<Triangle>::get(prim);
float3 minCellIdf = (bounds.vtx[0] - minBound - cellSize * 0.001f) * cellSizeRCP;
const float3 maxCellIdPlus1f = (bounds.vtx[1] - minBound + cellSize * 0.001f) * cellSizeRCP + rep(1.f);
const int minCellIdX = max(0, (int)(minCellIdf.x));
const int minCellIdY = max(0, (int)(minCellIdf.y));
const int minCellIdZ = max(0, (int)(minCellIdf.z));
const int maxCellIdP1X = min(gridRes[0], (int)(maxCellIdPlus1f.x));
const int maxCellIdP1Y = min(gridRes[1], (int)(maxCellIdPlus1f.y));
const int maxCellIdP1Z = min(gridRes[2], (int)(maxCellIdPlus1f.z));
const int numCells =
(maxCellIdP1X - minCellIdX)
* (maxCellIdP1Y - minCellIdY)
* (maxCellIdP1Z - minCellIdZ);
return numCells;
}
};
//////////////////////////////////////////////////////////////////////////
//axis tests
//////////////////////////////////////////////////////////////////////////
__host__ __device__ bool axisTest(
const float a,
const float b,
const float fa,
const float fb,
const float v0a,
const float v0b,
const float v1a,
const float v1b,
const float aCellSizeHALFa,
const float aCellSizeHALFb)
{
const float p0 = a * v0a + b * v0b;
const float p1 = a * v1a + b * v1b;
const float minP = fminf(p0, p1);
const float maxP = fmaxf(p0, p1);
const float rad = fa * aCellSizeHALFa + fb * aCellSizeHALFb;
return !(minP > rad + EPS || maxP + EPS < -rad);
}
#define AXISTEST_X01(e, fe, v0, v1, v2, s) \
axisTest(e.z, -e.y, fe.z, fe.y, v0.y, v0.z, v2.y, v2.z, s.y, s.z)
#define AXISTEST_X2(e, fe, v0, v1, v2, s) \
axisTest(e.z, -e.y, fe.z, fe.y, v0.y, v0.z, v1.y, v1.z, s.y, s.z)
#define AXISTEST_Y02(e, fe, v0, v1, v2, s) \
axisTest(-e.z, e.x, fe.z, fe.x, v0.x, v0.z, v2.x, v2.z, s.x, s.z)
#define AXISTEST_Y1(e, fe, v0, v1, v2, s) \
axisTest(-e.z, e.x, fe.z, fe.x, v0.x, v0.z, v1.x, v1.z, s.x, s.z)
#define AXISTEST_Z12(e, fe, v0, v1, v2, s) \
axisTest(e.y, -e.x, fe.y, fe.x, v1.x, v1.y, v2.x, v2.y, s.x, s.y)
#define AXISTEST_Z0(e, fe, v0, v1, v2, s) \
axisTest(e.y, -e.x, fe.y, fe.x, v0.x, v0.y, v1.x, v1.y, s.x, s.y)
//////////////////////////////////////////////////////////////////////////
class FragmentWriter
{
public:
int gridRes[3];
const float3 minBound;
const float3 cellSize;
const float3 cellSizeRCP;
thrust::device_ptr<float3> vertexArray;
thrust::device_ptr<uint> outKeys;
thrust::device_ptr<uint> outValues;
FragmentWriter(
int aGridResX, int aGridResY, int aGridResZ,
float3 aMinBound,
float3 aCellSize,
float3 aCellSizeRCP,
thrust::device_ptr<float3> aVtxArray,
thrust::device_ptr<uint> aKeys,
thrust::device_ptr<uint> aVals
) :
minBound(aMinBound),
cellSize(aCellSize),
cellSizeRCP(aCellSizeRCP),
vertexArray(aVtxArray),
outKeys(aKeys),
outValues(aVals)
{
gridRes[0] = aGridResX;
gridRes[1] = aGridResY;
gridRes[2] = aGridResZ;
}
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
const uint3 aTriVtxIds = thrust::get<0>(t);
const unsigned int startPosition = thrust::get<1>(t);
const size_t triangleId = thrust::get<2>(t);
Triangle triangle;
triangle.vtx[0] = vertexArray[aTriVtxIds.x];
triangle.vtx[1] = vertexArray[aTriVtxIds.y];
triangle.vtx[2] = vertexArray[aTriVtxIds.z];
BBox bounds = BBoxExtractor<Triangle>::get(triangle);
float3 minCellIdf = (bounds.vtx[0] - minBound - cellSize * 0.001f) * cellSizeRCP;
const float3 maxCellIdPlus1f = (bounds.vtx[1] - minBound + cellSize * 0.001f) * cellSizeRCP + rep(1.f);
const int minCellIdX = max(0, (int)(minCellIdf.x));
const int minCellIdY = max(0, (int)(minCellIdf.y));
const int minCellIdZ = max(0, (int)(minCellIdf.z));
const int maxCellIdP1X = min(gridRes[0], (int)(maxCellIdPlus1f.x));
const int maxCellIdP1Y = min(gridRes[1], (int)(maxCellIdPlus1f.y));
const int maxCellIdP1Z = min(gridRes[2], (int)(maxCellIdPlus1f.z));
unsigned int nextSlot = startPosition;
const float3 normal =
~((triangle.vtx[1] - triangle.vtx[0]) %
(triangle.vtx[2] - triangle.vtx[0]));
const float3 gridCellSizeHALF = cellSize * 0.505f; //1% extra as epsilon
float3 minCellCenter;
minCellCenter.x = (float)(minCellIdX);
minCellCenter.y = (float)(minCellIdY);
minCellCenter.z = (float)(minCellIdZ);
minCellCenter = minCellCenter * cellSize;
minCellCenter = minCellCenter + minBound + gridCellSizeHALF;
float3 cellCenter;
cellCenter.z = minCellCenter.z - cellSize.z;
for (int z = minCellIdZ; z < maxCellIdP1Z; ++z)
{
cellCenter.z += cellSize.z;
cellCenter.y = minCellCenter.y - cellSize.y;
for (int y = minCellIdY; y < maxCellIdP1Y; ++y)
{
cellCenter.y += cellSize.y;
cellCenter.x = minCellCenter.x - cellSize.x;
for (int x = minCellIdX; x < maxCellIdP1X; ++x, ++nextSlot)
{
cellCenter.x += cellSize.x;
//////////////////////////////////////////////////////////////////////////
//coordinate transform origin -> cellCenter
const float3 v0 = triangle.vtx[0] - cellCenter;
const float3 v1 = triangle.vtx[1] - cellCenter;
const float3 v2 = triangle.vtx[2] - cellCenter;
const float3 e0 = v1 - v0;
const float3 e1 = v2 - v1;
const float3 e2 = v0 - v2;
bool passedAllTests = true;
//////////////////////////////////////////////////////////////////////////
//Plane/box overlap test
float3 vmin, vmax;
vmin.x = (normal.x > 0.f) ? -gridCellSizeHALF.x : gridCellSizeHALF.x;
vmin.y = (normal.y > 0.f) ? -gridCellSizeHALF.y : gridCellSizeHALF.y;
vmin.z = (normal.z > 0.f) ? -gridCellSizeHALF.z : gridCellSizeHALF.z;
vmax = -vmin;
vmax = vmax - v0;
vmin = vmin - v0;
passedAllTests = passedAllTests && dot(normal, vmin) <= 0.f && dot(normal, vmax) > 0.f;
//Note: early exit here makes the code slower (CUDA 7.5, GTX 970)
//////////////////////////////////////////////////////////////////////////
//9 tests for separating axis
float3 fe;
fe.x = fabsf(e0.x);
fe.y = fabsf(e0.y);
fe.z = fabsf(e0.z);
passedAllTests = passedAllTests && AXISTEST_X01(e0, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Y02(e0, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Z12(e0, fe, v0, v1, v2, gridCellSizeHALF);
fe.x = fabsf(e1.x);
fe.y = fabsf(e1.y);
fe.z = fabsf(e1.z);
passedAllTests = passedAllTests && AXISTEST_X01(e1, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Y02(e1, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Z0(e1, fe, v0, v1, v2, gridCellSizeHALF);
fe.x = fabsf(e2.x);
fe.y = fabsf(e2.y);
fe.z = fabsf(e2.z);
passedAllTests = passedAllTests && AXISTEST_X2(e2, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Y1(e2, fe, v0, v1, v2, gridCellSizeHALF);
passedAllTests = passedAllTests && AXISTEST_Z12(e2, fe, v0, v1, v2, gridCellSizeHALF);
if (!passedAllTests)
{
outKeys[nextSlot] = (uint)(gridRes[0] * gridRes[1] * gridRes[2]);
outValues[nextSlot] = (uint)triangleId;
continue;
}
outKeys[nextSlot] = x + y * gridRes[0] + z * (gridRes[0] * gridRes[1]);
outValues[nextSlot] = (uint)triangleId;
}//end for z
}//end for y
}//end for x
}
};
class CellExtractor
{
public:
int gridRes[3];
uint* cells_ptr;
CellExtractor(
int aGridResX, int aGridResY, int aGridResZ,
uint2* aCellsPtr)
{
gridRes[0] = aGridResX;
gridRes[1] = aGridResY;
gridRes[2] = aGridResZ;
cells_ptr = (uint*)aCellsPtr;
}
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
const unsigned int myCellIndex = thrust::get<0>(t);
const unsigned int nextCellIndex = thrust::get<1>(t);
const size_t myId = thrust::get<2>(t);
if (myCellIndex >= (unsigned int)gridRes[0] * gridRes[1] * gridRes[2])
return;
if (myCellIndex != nextCellIndex)
{
//end of range for the cell at myCellIndex
cells_ptr[2u * myCellIndex + 1u] = (unsigned int)myId + 1u;
//start of range for the cell at nextCellIndex
if (nextCellIndex < (unsigned int)gridRes[0] * gridRes[1] * gridRes[2])
cells_ptr[2u * nextCellIndex] = (unsigned int)myId + 1u;
}
}
};
__host__ UniformGrid UniformGridSortBuilder::build(WFObject & aGeometry, const int aResX, const int aResY, const int aResZ)
{
cudastd::timer timer;
UniformGrid oGrid;
//initialize grid resolution
oGrid.res[0] = thrust::max<int>(aResX, 1);
oGrid.res[1] = thrust::max<int>(aResY, 1);
oGrid.res[2] = thrust::max<int>(aResZ, 1);
//allocate grid cells
oGrid.cells = thrust::device_new<uint2>(oGrid.res[0] * oGrid.res[1] * oGrid.res[2]);
//initialize empy cells
thrust::device_ptr<uint2> dev_ptr_uint2 = oGrid.cells;
uint2 * raw_ptr_uint2 = thrust::raw_pointer_cast(dev_ptr_uint2);
uint * raw_ptr_uint = (uint*)raw_ptr_uint2;
thrust::device_ptr<uint> dev_ptr_uint(raw_ptr_uint);
thrust::fill(dev_ptr_uint, dev_ptr_uint + 2 * oGrid.res[0] * oGrid.res[1] * oGrid.res[2], 0u);
//compute vertex index buffer for the triangles
thrust::host_vector<uint3> host_indices(aGeometry.faces.size());
for (size_t i = 0; i < aGeometry.faces.size(); i++)
{
host_indices[i].x = (unsigned int)aGeometry.faces[i].vert1;
host_indices[i].y = (unsigned int)aGeometry.faces[i].vert2;
host_indices[i].z = (unsigned int)aGeometry.faces[i].vert3;
}
//copy the vertex index buffer to the device
thrust::device_vector<uint3> device_indices(aGeometry.faces.size());
thrust::copy(host_indices.begin(), host_indices.end(), device_indices.begin());
//copy the vertex buffer to the device
thrust::device_vector<float3> device_vertices(aGeometry.vertices.begin(), aGeometry.vertices.end());
//compute scene bounding box
oGrid.vtx[0] = thrust::reduce(device_vertices.begin(), device_vertices.end(), make_float3( FLT_MAX, FLT_MAX, FLT_MAX), binary_float3_min());
oGrid.vtx[1] = thrust::reduce(device_vertices.begin(), device_vertices.end(), make_float3(-FLT_MAX, -FLT_MAX,- FLT_MAX), binary_float3_max());
const float boundsDiagonal = len(oGrid.vtx[1] - oGrid.vtx[0]);
const float myEpsilon = boundsDiagonal * 0.0005773f;//0.57735026918962576450914878050196 = 1.0 / sqrt(3.0)
//extend scene bounding box with epsilon in each dimension
oGrid.vtx[0] -= make_float3(myEpsilon, myEpsilon, myEpsilon);
oGrid.vtx[1] += make_float3(myEpsilon, myEpsilon, myEpsilon);
//count triangle-cell intersections
thrust::device_vector<unsigned int> fragment_counts(device_indices.size() + 1, 0u);
FragmentCounter frag_count(
oGrid.res[0], oGrid.res[1], oGrid.res[2],
oGrid.vtx[0],
oGrid.getCellSize(),
oGrid.getCellSizeRCP(),
device_vertices.data()
);
thrust::transform(device_indices.begin(), device_indices.end(), fragment_counts.begin(), frag_count);
thrust::exclusive_scan(fragment_counts.begin(), fragment_counts.end(), fragment_counts.begin());
//#ifdef _DEBUG
// outputDeviceVector("Scanned counts: ", fragment_counts);
//#endif
size_t num_fragments = fragment_counts[device_indices.size()];
//allocate cell index and triangle index buffers
thrust::device_vector<uint> fragment_keys(num_fragments);
oGrid.primitives = thrust::device_new<uint> (num_fragments);//fragment_vals
oGrid.numRefs = (unsigned)num_fragments;
//write triangle-cell pairs
FragmentWriter frag_write(
oGrid.res[0], oGrid.res[1], oGrid.res[2],
oGrid.vtx[0],
oGrid.getCellSize(),
oGrid.getCellSizeRCP(),
device_vertices.data(),
fragment_keys.data(),
oGrid.primitives
);
thrust::counting_iterator<size_t> first(0u);
thrust::counting_iterator<size_t> last(device_indices.size());
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(device_indices.begin(), fragment_counts.begin(), first)),
thrust::make_zip_iterator(thrust::make_tuple(device_indices.end(), fragment_counts.end() - 1u, last)),
frag_write);
//sort the pairs
thrust::sort_by_key(fragment_keys.begin(), fragment_keys.end(), oGrid.primitives);
//#ifdef _DEBUG
// outputDeviceVector("sorted keys: ", fragment_keys);
// outputDeviceVector("sorted vals: ", oGrid.primitives);
//#endif
//initilize the grid cells
CellExtractor extract_ranges(
oGrid.res[0], oGrid.res[1], oGrid.res[2],
thrust::raw_pointer_cast(oGrid.cells)
);
thrust::counting_iterator<size_t> first_pair(0u);
thrust::counting_iterator<size_t> last_pair(num_fragments - 1);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(fragment_keys.begin(), fragment_keys.begin() + 1, first_pair)),
thrust::make_zip_iterator(thrust::make_tuple(fragment_keys.end() - 1, fragment_keys.end(), last_pair)),
extract_ranges);
//#ifdef _DEBUG
// thrust::device_vector<unsigned int> cells_x(oGrid.cells.size());
// thrust::device_vector<unsigned int> cells_y(oGrid.cells.size());
// thrust::transform(oGrid.cells.begin(), oGrid.cells.end(), cells_x.begin(), uint2_get_x());
// thrust::transform(oGrid.cells.begin(), oGrid.cells.end(), cells_y.begin(), uint2_get_y());
// outputDeviceVector("Grid cells x: ", cells_x);
// outputDeviceVector("Grid cells y: ", cells_y);
//#endif
totalTime = timer.get();
timer.cleanup();
resX = aResX;
resY = aResY;
resZ = aResZ;
return oGrid;
}
__host__ int UniformGridSortBuilder::test(UniformGrid& aGrid, WFObject & aGeometry)
{
const size_t numCells = (size_t)(aGrid.res[0] * aGrid.res[1] * aGrid.res[2]);
thrust::host_vector<uint2> host_cells(numCells);
thrust::copy(aGrid.cells, aGrid.cells + numCells, host_cells.begin());
thrust::host_vector<unsigned int> host_primitives(aGrid.numRefs);
thrust::copy(aGrid.primitives, aGrid.primitives + aGrid.numRefs, host_primitives.begin());
for (int z = 0; z < aGrid.res[2]; ++z)
{
for (int y = 0; y < aGrid.res[1]; ++y)
{
for (int x = 0; x < aGrid.res[0]; ++x)
{
uint2 cell = host_cells[x + y * aGrid.res[0] + z * aGrid.res[0] * aGrid.res[1]];
if (cell.x > cell.y)
{
std::cerr << "Grid cell (" << x << ", " << y << ", " << z << ") has invalid range (" << cell.x << ", " << cell.y << ")\n";
return 1;
}
for (size_t refId = cell.x; refId < cell.y; ++refId)
{
if (refId >= host_primitives.size())
{
std::cerr << "Ivalid primitive reference " << refId << " in cell (" << x << ", " << y << ", " << z << ")\n";
return 2;
}
unsigned int triId = host_primitives[refId];
if (triId >= aGeometry.faces.size())
{
std::cerr << "Ivalid primitive index " << triId << " in cell (" << x << ", " << y << ", " << z << ")\n";
return 3;
}
Triangle prim;
prim.vtx[0] = aGeometry.vertices[(unsigned int)aGeometry.faces[triId].vert1];
prim.vtx[1] = aGeometry.vertices[(unsigned int)aGeometry.faces[triId].vert2];
prim.vtx[2] = aGeometry.vertices[(unsigned int)aGeometry.faces[triId].vert3];
BBox bounds = BBoxExtractor<Triangle>::get(prim);
float3 minCellIdf = (bounds.vtx[0] - aGrid.vtx[0] - aGrid.getCellSize() * 0.001f) * aGrid.getCellSizeRCP();
const float3 maxCellIdPlus1f = (bounds.vtx[1] - aGrid.vtx[0] + aGrid.getCellSize() * 0.001f) * aGrid.getCellSizeRCP() + rep(1.f);
const int minCellIdX = std::max(0, (int)(minCellIdf.x));
const int minCellIdY = std::max(0, (int)(minCellIdf.y));
const int minCellIdZ = std::max(0, (int)(minCellIdf.z));
const int maxCellIdP1X = std::min(aGrid.res[0], (int)(maxCellIdPlus1f.x));
const int maxCellIdP1Y = std::min(aGrid.res[1], (int)(maxCellIdPlus1f.y));
const int maxCellIdP1Z = std::min(aGrid.res[2], (int)(maxCellIdPlus1f.z));
if (x < minCellIdX || maxCellIdP1X <= x ||
y < minCellIdY || maxCellIdP1Y <= y ||
z < minCellIdZ || maxCellIdP1Z <= z )
{
std::cerr << "Primitive " << triId << " inserted in wrong cell (" << x << ", " << y << ", " << z << ")\n";
return 4;
}
}
}
}
}
for (size_t primId = 0; primId < aGeometry.faces.size(); ++primId)
{
bool inserted = false;
for (size_t refId = 0; refId < host_primitives.size(); ++refId)
{
if (host_primitives[refId] == primId)
{
inserted = true;
break;
}
}
if (!inserted)
{
std::cerr << "Primitive " << primId << " not inserted in the grid!\n";
return 5;
}
}
return 0;
}
__host__ void UniformGridSortBuilder::stats()
{
std::cerr << "[" << resX << " x " << resY << " x " << resZ << "] grid build in " << totalTime << "ms\n";
}
|
29163c08310317f06d0373355c1d1b561cdd50ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cuda_neural_net(float *Weights_D, int num_per_sweeper, int num_per_layer, int num_per_input, int num_per_output, int num_weights, int num_layers, float response, float *inputs_d, float *outputs_d)
{
extern __shared__ float buffer[];
int start_of_weights = blockIdx.x * num_weights;
int start_of_hidden_layers = start_of_weights + (num_per_input * num_per_layer);
//input layer
buffer[threadIdx.x] = 0;
for (int i = 0; i < num_per_input; ++i)
{
buffer[threadIdx.x] += inputs_d[(blockIdx.x * num_per_input) + i] * Weights_D[start_of_weights + (threadIdx.x * num_per_input) + i];
}
buffer[threadIdx.x] = 1.0 / (1.0 + exp(-buffer[threadIdx.x] / response));
__syncthreads();
//subsequent hidden layers
float temp;
for (int i = 0; i < num_layers; ++i)
{
temp = 0;
for (int j = 0; j < num_per_layer; ++j)
{
temp += buffer[j] * Weights_D[start_of_hidden_layers + (num_per_layer * num_per_layer * i) + (num_per_layer * threadIdx.x) + j];
}
temp = 1.0 / (1.0 + exp(-temp / response));
__syncthreads();
buffer[threadIdx.x] = temp;
__syncthreads();
}
//output layer
if (threadIdx.x < num_per_output)
{
temp = 0;
for (int i = 0; i < num_per_layer; ++i)
{
temp += buffer[i] * Weights_D[start_of_hidden_layers + (num_per_layer * num_per_layer * num_layers) + (num_per_layer * threadIdx.x) + i];
}
temp = 1.0 / (1.0 + exp(-temp / response));
__syncthreads();
//copy the result back out to the outputs vector
outputs_d[(blockIdx.x * num_per_output) + threadIdx.x] = temp;
}
} | 29163c08310317f06d0373355c1d1b561cdd50ca.cu | #include "includes.h"
__global__ void cuda_neural_net(float *Weights_D, int num_per_sweeper, int num_per_layer, int num_per_input, int num_per_output, int num_weights, int num_layers, float response, float *inputs_d, float *outputs_d)
{
extern __shared__ float buffer[];
int start_of_weights = blockIdx.x * num_weights;
int start_of_hidden_layers = start_of_weights + (num_per_input * num_per_layer);
//input layer
buffer[threadIdx.x] = 0;
for (int i = 0; i < num_per_input; ++i)
{
buffer[threadIdx.x] += inputs_d[(blockIdx.x * num_per_input) + i] * Weights_D[start_of_weights + (threadIdx.x * num_per_input) + i];
}
buffer[threadIdx.x] = 1.0 / (1.0 + exp(-buffer[threadIdx.x] / response));
__syncthreads();
//subsequent hidden layers
float temp;
for (int i = 0; i < num_layers; ++i)
{
temp = 0;
for (int j = 0; j < num_per_layer; ++j)
{
temp += buffer[j] * Weights_D[start_of_hidden_layers + (num_per_layer * num_per_layer * i) + (num_per_layer * threadIdx.x) + j];
}
temp = 1.0 / (1.0 + exp(-temp / response));
__syncthreads();
buffer[threadIdx.x] = temp;
__syncthreads();
}
//output layer
if (threadIdx.x < num_per_output)
{
temp = 0;
for (int i = 0; i < num_per_layer; ++i)
{
temp += buffer[i] * Weights_D[start_of_hidden_layers + (num_per_layer * num_per_layer * num_layers) + (num_per_layer * threadIdx.x) + i];
}
temp = 1.0 / (1.0 + exp(-temp / response));
__syncthreads();
//copy the result back out to the outputs vector
outputs_d[(blockIdx.x * num_per_output) + threadIdx.x] = temp;
}
} |
eea0ef524980965b2b07d979135b8e525826764f.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/device_ptr.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/tree_model.h"
#include "device_helpers_hip.cuh"
namespace xgboost {
// the handler to call instead of hipSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
template <typename T>
class HostDeviceVectorImpl {
public:
HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(size, v);
} else {
data_h_.resize(size, v);
}
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
LazyResizeDevice(init.size());
Copy(init);
} else {
data_h_ = init;
}
}
HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) :
device_{that.device_},
data_h_{std::move(that.data_h_)},
data_d_{std::move(that.data_d_)},
gpu_access_{that.gpu_access_} {}
~HostDeviceVectorImpl() {
if (device_ >= 0) {
SetDevice();
}
}
size_t Size() const {
return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0;
}
int DeviceIdx() const { return device_; }
T* DevicePointer() {
LazySyncDevice(GPUAccess::kWrite);
return data_d_->data().get();
}
const T* ConstDevicePointer() {
LazySyncDevice(GPUAccess::kRead);
return data_d_->data().get();
}
common::Span<T> DeviceSpan() {
LazySyncDevice(GPUAccess::kWrite);
return {data_d_->data().get(), Size()};
}
common::Span<const T> ConstDeviceSpan() {
LazySyncDevice(GPUAccess::kRead);
return {data_d_->data().get(), Size()};
}
void Fill(T v) { // NOLINT
if (HostCanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
thrust::fill(data_d_->begin(), data_d_->end(), v);
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
SetDevice(other->device_);
// Data is on host.
if (HostCanWrite() && other->HostCanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
SetDevice();
CopyToDevice(other);
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.data());
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.begin());
}
}
void Extend(HostDeviceVectorImpl* other) {
auto ori_size = this->Size();
this->Resize(ori_size + other->Size(), T());
if (HostCanWrite() && other->HostCanRead()) {
auto& h_vec = this->HostVector();
auto& other_vec = other->HostVector();
CHECK_EQ(h_vec.size(), ori_size + other->Size());
std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size);
} else {
auto ptr = other->ConstDevicePointer();
SetDevice();
CHECK_EQ(this->DeviceIdx(), other->DeviceIdx());
dh::safe_cuda(hipMemcpyAsync(this->DevicePointer() + ori_size,
ptr,
other->Size() * sizeof(T),
hipMemcpyDeviceToDevice));
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kNone);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void SetDevice(int device) {
if (device_ == device) { return; }
if (device_ >= 0) {
LazySyncHost(GPUAccess::kNone);
}
device_ = device;
if (device_ >= 0) {
LazyResizeDevice(data_h_.size());
}
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) {
// fast on-device resize
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(new_size, v);
} else {
// resize on host
LazySyncHost(GPUAccess::kNone);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (HostCanAccess(access)) { return; }
if (HostCanRead()) {
// data is present, just need to deny access to the device
gpu_access_ = access;
return;
}
gpu_access_ = access;
if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); }
SetDevice();
dh::safe_cuda(hipMemcpy(data_h_.data(),
data_d_->data().get(),
data_d_->size() * sizeof(T),
hipMemcpyDeviceToHost));
}
void LazySyncDevice(GPUAccess access) {
if (DeviceCanAccess(access)) { return; }
if (DeviceCanRead()) {
// deny read to the host
gpu_access_ = access;
return;
}
// data is on the host
LazyResizeDevice(data_h_.size());
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(),
data_h_.data(),
data_d_->size() * sizeof(T),
hipMemcpyHostToDevice));
gpu_access_ = access;
}
bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; }
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; }
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
GPUAccess Access() const { return gpu_access_; }
private:
int device_{-1};
std::vector<T> data_h_{};
std::unique_ptr<dh::device_vector<T>> data_d_{};
GPUAccess gpu_access_{GPUAccess::kNone};
void CopyToDevice(HostDeviceVectorImpl* other) {
if (other->HostCanWrite()) {
CopyToDevice(other->data_h_.data());
} else {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(),
data_d_->size() * sizeof(T), hipMemcpyDefault));
}
}
void CopyToDevice(const T* begin) {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(), begin,
data_d_->size() * sizeof(T), hipMemcpyDefault));
}
void LazyResizeDevice(size_t new_size) {
if (data_d_ && new_size == data_d_->size()) { return; }
SetDevice();
data_d_->resize(new_size);
}
void SetDevice() {
CHECK_GE(device_, 0);
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(hipSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
if (!data_d_) {
data_d_.reset(new dh::device_vector<T>);
}
}
};
template<typename T>
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device)
: impl_(new HostDeviceVectorImpl<T>(size, v, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& other)
: impl_(new HostDeviceVectorImpl<T>(std::move(*other.impl_))) {}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> new_impl(
new HostDeviceVectorImpl<T>(std::move(*other.impl_)));
delete impl_;
impl_ = new_impl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); }
template <typename T>
T* HostDeviceVector<T>::DevicePointer() {
return impl_->DevicePointer();
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer() const {
return impl_->ConstDevicePointer();
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
return impl_->DeviceSpan();
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
return impl_->ConstDeviceSpan();
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Extend(HostDeviceVector const& other) {
impl_->Extend(other.impl_);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanRead() const {
return impl_->HostCanRead();
}
template <typename T>
bool HostDeviceVector<T>::HostCanWrite() const {
return impl_->HostCanWrite();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanRead() const {
return impl_->DeviceCanRead();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanWrite() const {
return impl_->DeviceCanWrite();
}
template <typename T>
GPUAccess HostDeviceVector<T>::DeviceAccess() const {
return impl_->Access();
}
template <typename T>
void HostDeviceVector<T>::SetDevice(int device) const {
impl_->SetDevice(device);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int32_t>; // bst_node_t
template class HostDeviceVector<uint8_t>;
template class HostDeviceVector<FeatureType>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<uint64_t>; // bst_row_t
template class HostDeviceVector<uint32_t>; // bst_feature_t
template class HostDeviceVector<RegTree::Node>;
template class HostDeviceVector<RTreeNodeStat>;
#if defined(__APPLE__)
/*
* On OSX:
*
* typedef unsigned int uint32_t;
* typedef unsigned long long uint64_t;
* typedef unsigned long __darwin_size_t;
*/
template class HostDeviceVector<std::size_t>;
#endif // defined(__APPLE__)
} // namespace xgboost
| eea0ef524980965b2b07d979135b8e525826764f.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/device_ptr.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/tree_model.h"
#include "device_helpers.cuh"
namespace xgboost {
// the handler to call instead of cudaSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
template <typename T>
class HostDeviceVectorImpl {
public:
HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(size, v);
} else {
data_h_.resize(size, v);
}
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
LazyResizeDevice(init.size());
Copy(init);
} else {
data_h_ = init;
}
}
HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) :
device_{that.device_},
data_h_{std::move(that.data_h_)},
data_d_{std::move(that.data_d_)},
gpu_access_{that.gpu_access_} {}
~HostDeviceVectorImpl() {
if (device_ >= 0) {
SetDevice();
}
}
size_t Size() const {
return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0;
}
int DeviceIdx() const { return device_; }
T* DevicePointer() {
LazySyncDevice(GPUAccess::kWrite);
return data_d_->data().get();
}
const T* ConstDevicePointer() {
LazySyncDevice(GPUAccess::kRead);
return data_d_->data().get();
}
common::Span<T> DeviceSpan() {
LazySyncDevice(GPUAccess::kWrite);
return {data_d_->data().get(), Size()};
}
common::Span<const T> ConstDeviceSpan() {
LazySyncDevice(GPUAccess::kRead);
return {data_d_->data().get(), Size()};
}
void Fill(T v) { // NOLINT
if (HostCanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
thrust::fill(data_d_->begin(), data_d_->end(), v);
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
SetDevice(other->device_);
// Data is on host.
if (HostCanWrite() && other->HostCanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
SetDevice();
CopyToDevice(other);
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.data());
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.begin());
}
}
void Extend(HostDeviceVectorImpl* other) {
auto ori_size = this->Size();
this->Resize(ori_size + other->Size(), T());
if (HostCanWrite() && other->HostCanRead()) {
auto& h_vec = this->HostVector();
auto& other_vec = other->HostVector();
CHECK_EQ(h_vec.size(), ori_size + other->Size());
std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size);
} else {
auto ptr = other->ConstDevicePointer();
SetDevice();
CHECK_EQ(this->DeviceIdx(), other->DeviceIdx());
dh::safe_cuda(cudaMemcpyAsync(this->DevicePointer() + ori_size,
ptr,
other->Size() * sizeof(T),
cudaMemcpyDeviceToDevice));
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kNone);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void SetDevice(int device) {
if (device_ == device) { return; }
if (device_ >= 0) {
LazySyncHost(GPUAccess::kNone);
}
device_ = device;
if (device_ >= 0) {
LazyResizeDevice(data_h_.size());
}
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) {
// fast on-device resize
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(new_size, v);
} else {
// resize on host
LazySyncHost(GPUAccess::kNone);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (HostCanAccess(access)) { return; }
if (HostCanRead()) {
// data is present, just need to deny access to the device
gpu_access_ = access;
return;
}
gpu_access_ = access;
if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); }
SetDevice();
dh::safe_cuda(cudaMemcpy(data_h_.data(),
data_d_->data().get(),
data_d_->size() * sizeof(T),
cudaMemcpyDeviceToHost));
}
void LazySyncDevice(GPUAccess access) {
if (DeviceCanAccess(access)) { return; }
if (DeviceCanRead()) {
// deny read to the host
gpu_access_ = access;
return;
}
// data is on the host
LazyResizeDevice(data_h_.size());
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(),
data_h_.data(),
data_d_->size() * sizeof(T),
cudaMemcpyHostToDevice));
gpu_access_ = access;
}
bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; }
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; }
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
GPUAccess Access() const { return gpu_access_; }
private:
int device_{-1};
std::vector<T> data_h_{};
std::unique_ptr<dh::device_vector<T>> data_d_{};
GPUAccess gpu_access_{GPUAccess::kNone};
void CopyToDevice(HostDeviceVectorImpl* other) {
if (other->HostCanWrite()) {
CopyToDevice(other->data_h_.data());
} else {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(),
data_d_->size() * sizeof(T), cudaMemcpyDefault));
}
}
void CopyToDevice(const T* begin) {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), begin,
data_d_->size() * sizeof(T), cudaMemcpyDefault));
}
void LazyResizeDevice(size_t new_size) {
if (data_d_ && new_size == data_d_->size()) { return; }
SetDevice();
data_d_->resize(new_size);
}
void SetDevice() {
CHECK_GE(device_, 0);
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(cudaSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
if (!data_d_) {
data_d_.reset(new dh::device_vector<T>);
}
}
};
template<typename T>
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device)
: impl_(new HostDeviceVectorImpl<T>(size, v, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& other)
: impl_(new HostDeviceVectorImpl<T>(std::move(*other.impl_))) {}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> new_impl(
new HostDeviceVectorImpl<T>(std::move(*other.impl_)));
delete impl_;
impl_ = new_impl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); }
template <typename T>
T* HostDeviceVector<T>::DevicePointer() {
return impl_->DevicePointer();
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer() const {
return impl_->ConstDevicePointer();
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
return impl_->DeviceSpan();
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
return impl_->ConstDeviceSpan();
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Extend(HostDeviceVector const& other) {
impl_->Extend(other.impl_);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanRead() const {
return impl_->HostCanRead();
}
template <typename T>
bool HostDeviceVector<T>::HostCanWrite() const {
return impl_->HostCanWrite();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanRead() const {
return impl_->DeviceCanRead();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanWrite() const {
return impl_->DeviceCanWrite();
}
template <typename T>
GPUAccess HostDeviceVector<T>::DeviceAccess() const {
return impl_->Access();
}
template <typename T>
void HostDeviceVector<T>::SetDevice(int device) const {
impl_->SetDevice(device);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int32_t>; // bst_node_t
template class HostDeviceVector<uint8_t>;
template class HostDeviceVector<FeatureType>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<uint64_t>; // bst_row_t
template class HostDeviceVector<uint32_t>; // bst_feature_t
template class HostDeviceVector<RegTree::Node>;
template class HostDeviceVector<RTreeNodeStat>;
#if defined(__APPLE__)
/*
* On OSX:
*
* typedef unsigned int uint32_t;
* typedef unsigned long long uint64_t;
* typedef unsigned long __darwin_size_t;
*/
template class HostDeviceVector<std::size_t>;
#endif // defined(__APPLE__)
} // namespace xgboost
|
a9170f20ef66a1619c551e83f037bd472c0f5911.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include <stdio.h>
#include <helper_math.h>
#define TPB 512
__global__
void centroidKernel(const uchar4 *d_img, int *d_centroidCol,
int *d_centroidRow, int *d_pixelCount,
int width, int height){
__shared__ uint4 s_img[TPB];
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int s_idx = threadIdx.x;
const int row = idx / width;
const int col = idx - row*width;
if((d_img[idx].x < 255 || d_img[idx].y < 255 ||
d_img[idx].z < 255) && (idx < width*height)){
s_img[s_idx].x = col;
s_img[s_idx].y = row;
s_img[s_idx].z = 1;
}
else{
s_img[s_idx].x = 0;
s_img[s_idx].y = 0;
s_img[s_idx].z = 0;
}
__syncthreads();
#if 0
for(int s = 1; s < blockDim.x; s *= 2){
int index = 2*s*s_idx;
if(index < blockDim.x){
s_img[index] += s_img[index+s];
}
__syncthreads();
}
#else
for(int s = blockDim.x / 2; s > 0; s >>= 1){
if(s_idx < s){
s_img[s_idx] += s_img[s_idx + s];
}
__syncthreads();
}
#endif
if(s_idx == 0){
atomicAdd(d_centroidCol, s_img[0].x);
atomicAdd(d_centroidRow, s_img[0].y);
atomicAdd(d_pixelCount, s_img[0].z);
}
}
void centroidParallel(uchar4 *img, int width, int height){
uchar4 *d_img = 0;
int *d_centroidRow = 0, *d_centroidCol = 0, *d_pixelCount = 0;
int centroidRow = 0, centroidCol = 0, pixelCount = 0;
hipMalloc(&d_img, width*height*sizeof(uchar4));
hipMemcpy(d_img, img, width*height*sizeof(uchar4), hipMemcpyHostToDevice);
hipMalloc(&d_centroidRow, sizeof(int));
hipMalloc(&d_centroidCol, sizeof(int));
hipMalloc(&d_pixelCount, sizeof(int));
hipMemset(d_centroidRow, 0, sizeof(int));
hipMemset(d_centroidCol, 0, sizeof(int));
hipMemset(d_pixelCount, 0, sizeof(int));
hipLaunchKernelGGL(( centroidKernel), dim3((width*height + TPB - 1) / TPB), dim3(TPB) , 0, 0, d_img,
d_centroidCol, d_centroidRow, d_pixelCount, width, height);
hipMemcpy(¢roidRow, d_centroidRow, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(¢roidCol, d_centroidCol, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&pixelCount, d_pixelCount, sizeof(int), hipMemcpyDeviceToHost);
centroidCol /= pixelCount;
centroidRow /= pixelCount;
printf("Centroid: {col = %d, row = %d} based on %d pixels\n",
centroidCol, centroidRow, pixelCount);
for(int col = 0; col < width; ++col){
img[centroidRow*width + col].x = 255;
img[centroidRow*width + col].y = 0;
img[centroidRow*width + col].z = 0;
}
for(int row = 0; row < height; ++row){
img[row*width + centroidCol].x = 255;
img[row*width + centroidCol].y = 0;
img[row*width + centroidCol].z = 0;
}
hipFree(d_img);
hipFree(d_centroidRow);
hipFree(d_centroidCol);
hipFree(d_pixelCount);
}
| a9170f20ef66a1619c551e83f037bd472c0f5911.cu | #include "kernel.h"
#include <stdio.h>
#include <helper_math.h>
#define TPB 512
__global__
void centroidKernel(const uchar4 *d_img, int *d_centroidCol,
int *d_centroidRow, int *d_pixelCount,
int width, int height){
__shared__ uint4 s_img[TPB];
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int s_idx = threadIdx.x;
const int row = idx / width;
const int col = idx - row*width;
if((d_img[idx].x < 255 || d_img[idx].y < 255 ||
d_img[idx].z < 255) && (idx < width*height)){
s_img[s_idx].x = col;
s_img[s_idx].y = row;
s_img[s_idx].z = 1;
}
else{
s_img[s_idx].x = 0;
s_img[s_idx].y = 0;
s_img[s_idx].z = 0;
}
__syncthreads();
#if 0
for(int s = 1; s < blockDim.x; s *= 2){
int index = 2*s*s_idx;
if(index < blockDim.x){
s_img[index] += s_img[index+s];
}
__syncthreads();
}
#else
for(int s = blockDim.x / 2; s > 0; s >>= 1){
if(s_idx < s){
s_img[s_idx] += s_img[s_idx + s];
}
__syncthreads();
}
#endif
if(s_idx == 0){
atomicAdd(d_centroidCol, s_img[0].x);
atomicAdd(d_centroidRow, s_img[0].y);
atomicAdd(d_pixelCount, s_img[0].z);
}
}
void centroidParallel(uchar4 *img, int width, int height){
uchar4 *d_img = 0;
int *d_centroidRow = 0, *d_centroidCol = 0, *d_pixelCount = 0;
int centroidRow = 0, centroidCol = 0, pixelCount = 0;
cudaMalloc(&d_img, width*height*sizeof(uchar4));
cudaMemcpy(d_img, img, width*height*sizeof(uchar4), cudaMemcpyHostToDevice);
cudaMalloc(&d_centroidRow, sizeof(int));
cudaMalloc(&d_centroidCol, sizeof(int));
cudaMalloc(&d_pixelCount, sizeof(int));
cudaMemset(d_centroidRow, 0, sizeof(int));
cudaMemset(d_centroidCol, 0, sizeof(int));
cudaMemset(d_pixelCount, 0, sizeof(int));
centroidKernel<<< (width*height + TPB - 1) / TPB, TPB >>>(d_img,
d_centroidCol, d_centroidRow, d_pixelCount, width, height);
cudaMemcpy(¢roidRow, d_centroidRow, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(¢roidCol, d_centroidCol, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&pixelCount, d_pixelCount, sizeof(int), cudaMemcpyDeviceToHost);
centroidCol /= pixelCount;
centroidRow /= pixelCount;
printf("Centroid: {col = %d, row = %d} based on %d pixels\n",
centroidCol, centroidRow, pixelCount);
for(int col = 0; col < width; ++col){
img[centroidRow*width + col].x = 255;
img[centroidRow*width + col].y = 0;
img[centroidRow*width + col].z = 0;
}
for(int row = 0; row < height; ++row){
img[row*width + centroidCol].x = 255;
img[row*width + centroidCol].y = 0;
img[row*width + centroidCol].z = 0;
}
cudaFree(d_img);
cudaFree(d_centroidRow);
cudaFree(d_centroidCol);
cudaFree(d_pixelCount);
}
|
088c33b1643cb8e440a1590dc769503bb3b11354.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void teste (int *dev_a)
{
__shared__ int a[10];
a[threadIdx.x] = threadIdx.x;
__syncthreads();
printf("[%d] %d\n", threadIdx.x, a[(threadIdx.x+1)%10] );
}
int main(int argc, char const *argv[])
{
int *dev_a;
// int *host_a;
hipMalloc((void**) &dev_a, 10 * sizeof(int));
hipLaunchKernelGGL(( teste) , dim3(1),dim3(10), 0, 0, dev_a);
// host_a = (int*) malloc (2 * sizeof(int));
// hipMemcpy( host_a, dev_a, 2 * sizeof(int), hipMemcpyDeviceToHost);
// for (int i = 0; i < 2; ++i)
// {
// printf("%d %d\n", i, host_a[i] );
// }
hipDeviceSynchronize();
return 0;
}
| 088c33b1643cb8e440a1590dc769503bb3b11354.cu |
#include <stdio.h>
__global__ void teste (int *dev_a)
{
__shared__ int a[10];
a[threadIdx.x] = threadIdx.x;
__syncthreads();
printf("[%d] %d\n", threadIdx.x, a[(threadIdx.x+1)%10] );
}
int main(int argc, char const *argv[])
{
int *dev_a;
// int *host_a;
cudaMalloc((void**) &dev_a, 10 * sizeof(int));
teste <<<1,10>>> (dev_a);
// host_a = (int*) malloc (2 * sizeof(int));
// cudaMemcpy( host_a, dev_a, 2 * sizeof(int), cudaMemcpyDeviceToHost);
// for (int i = 0; i < 2; ++i)
// {
// printf("%d %d\n", i, host_a[i] );
// }
cudaDeviceSynchronize();
return 0;
}
|
7accb1bc0097769e9e6cd558897da009aea4c9e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <math.h>
#include "../headers/graph.h"
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
#define MAX_THREADS_PER_BLOCK 1024
using namespace std;
__global__
void AloopFW_inner(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int m, int k){
int row_offset = blockIdx.x*blockDim.x + threadIdx.x;
int col_offset = blockIdx.y*blockDim.y + threadIdx.y;
int sum = d_x[u_row_st + row_offset][u_col_st + k] + d_x[v_row_st + k][v_col_st + col_offset];
if(d_x[x_row_st + row_offset][x_col_st + col_offset] > sum)
d_x[x_row_st + row_offset][x_col_st + col_offset] = sum;
/*
int rowsPerThread = m / blockDim.x;
int colsPerThread = m / blockDim.y;
int r_offset_start = threadIdx.x * rowsPerThread;
int r_offset_end = r_offset_start + rowsPerThread - 1;
int c_offset_start = threadIdx.y * colsPerThread;
int c_offset_end = c_offset_start + colsPerThread - 1;
for(int i = r_offset_start; i <= r_offset_end; i++){
for(int j = c_offset_start; j <= c_offset_end; j++){
int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j];
if(d_x[x_row_st + i][x_col_st + j] > sum)
d_x[x_row_st + i][x_col_st + j] = sum;
}
}
*/
}
//Called from host (outermost for loop)
void AloopFW_outer(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int m){
int k;
for(k = 0; k < m; k++){
int threadX = min(m, THREADS_PER_BLOCK_X);
int threadY = min(m, THREADS_PER_BLOCK_Y);
int blocksX = m % threadX == 0 ? m/threadX : m/threadX + 1;
int blocksY = m % threadY == 0 ? m/threadY : m/threadY + 1;
dim3 blocksPerGrid(blocksX, blocksY);
dim3 threadsPerBlock(threadX, threadY);
hipLaunchKernelGGL(( AloopFW_inner), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, m, k);
}
}
__global__
void DloopFW(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int m){
/*
int kPerThread = m / blockDim.z;
int rowsPerThread = m / blockDim.x;
int colsPerThread = m / blockDim.y;
int k_offset_start = threadIdx.z * kPerThread;
int k_offset_end = k_offset_start + kPerThread - 1;
int r_offset_start = threadIdx.x * rowsPerThread;
int r_offset_end = r_offset_start + rowsPerThread - 1;
int c_offset_start = threadIdx.y * colsPerThread;
int c_offset_end = c_offset_start + colsPerThread - 1;
for(int k = k_offset_start; k <= k_offset_end; k++){
for(int i = r_offset_start; i <= r_offset_end; i++){
for(int j = c_offset_start; j <= c_offset_end; j++){
int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j];
if(d_x[x_row_st + i][x_col_st + j] > sum)
d_x[x_row_st + i][x_col_st + j] = sum;
}
}
}
*/
int k = blockIdx.z*blockDim.z + threadIdx.z;
int row_offset = blockIdx.x*blockDim.x + threadIdx.x;
int col_offset = blockIdx.y*blockDim.y + threadIdx.y;
int sum = d_x[u_row_st + row_offset][u_col_st + k] + d_x[v_row_st + k][v_col_st + col_offset];
if(d_x[x_row_st + row_offset][x_col_st + col_offset] > sum)
d_x[x_row_st + row_offset][x_col_st + col_offset] = sum;
}
//Recursive-3 implementation in HW1
void DFW(int ** x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int n, int m){
if(m > n)
return;
if(n == m){
/*
int threadZ = 1;
int threadX = min(m, THREADS_PER_BLOCK_X);
int threadY = min(m, THREADS_PER_BLOCK_Y);
int blockZ = m;
int blockX = m % threadX == 0 ? m/threadX : m/threadX + 1;
int blockY = m % threadY == 0 ? m/threadY : m/threadY + 1;
dim3 blocksPerGrid(blockX, blockY, blockZ);
dim3 threadsPerBlock(threadX, threadY, threadZ);
DloopFW<<<blocksPerGrid, threadsPerBlock>>>(x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, m);
*/
AloopFW_outer(x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, m);
}
else{
int mid = n/2;
//DFW (X11, U11, V11)
DFW(x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, mid, m);
//DFW (X12, U11, V12)
DFW(x, x_row_st, x_col_st + mid, u_row_st, u_col_st, v_row_st, v_col_st + mid, mid, m);
//DFW (X21, U21, V11)
DFW(x, x_row_st + mid, x_col_st, u_row_st + mid, u_col_st, v_row_st, v_col_st, mid, m);
//DFW (X22, U21, V12)
DFW(x, x_row_st + mid, x_col_st + mid, u_row_st + mid, u_col_st, v_row_st, v_col_st + mid, mid, m);
//DFW (X11, U12, V21)
DFW(x, x_row_st, x_col_st, u_row_st, u_col_st + mid, v_row_st + mid, v_col_st, mid, m);
//DFW (X12, U12, V22)
DFW(x, x_row_st, x_col_st + mid, u_row_st, u_col_st + mid, v_row_st + mid, v_col_st + mid, mid, m);
//DFW (X21, U22, V21)
DFW(x, x_row_st + mid, x_col_st, u_row_st + mid, u_col_st + mid, v_row_st + mid, v_col_st, mid, m);
//DFW (X22, U22, V22)
DFW(x, x_row_st + mid, x_col_st + mid, u_row_st + mid, u_col_st + mid, v_row_st + mid, v_col_st + mid, mid, m);
}
}
void DFW(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int n, int depth, int * tilesize){
int r = tilesize[depth];
if(r > n){
//Execute base case
AloopFW_outer(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n);
}
else
printf("Here\n");
}
void CFW(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int n, int depth, int * tilesize){
int r = tilesize[depth];
if(r > n){
//Execute base case
AloopFW_outer(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n);
}
else
printf("Here\n");
}
void BFW(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int n, int depth, int * tilesize){
int r = tilesize[depth];
if(r > n){
//Execute base case
AloopFW_outer(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n);
}
else
printf("Here\n");
}
//Figure 4 implementation : HW 5
void AFW(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int n, int depth, int * tilesize){
int r = tilesize[depth];
if(r > n){
//Execute base case
AloopFW_outer(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n);
}
else{
int sub_size = n / r;
for(int k = 0; k < r; k++){
int offset = k*sub_size;
AFW(d_x, x_row_st + offset, x_col_st + offset, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize);
//SYNC POINT
hipDeviceSynchronize();
for(int j = 0; j < r; j++){
if(j == k)
continue;
BFW(d_x, x_row_st + offset, x_col_st + j*sub_size, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize);
CFW(d_x, x_row_st + j*sub_size, x_col_st + offset, u_row_st + j*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize);
}
hipDeviceSynchronize();
for(int i = 0; i < r; i++){
if(i == k)
continue;
for(int j = 0; j < r; j++){
if(j == k)
continue;
DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize);
}
}
}
}
}//AFW
int ** copy_matrix_to_host(int ** dev_matrix, int n){
int ** new_matrix = new int*[n+1];
for(int i=1;i <= n; i++){
new_matrix[i] = new int[n+1];
int * begin;
hipMemcpy(&begin, &dev_matrix[i], sizeof (int *), hipMemcpyDeviceToHost);
hipMemcpy(new_matrix[i], begin, (n+1) * sizeof(int), hipMemcpyDeviceToHost);
}
return new_matrix;
}
int ** copy_matrix_to_device(int ** host_matrix, int n){
//int ** dev_matrix = new int*[n+1];
int ** dev_matrix;
hipError_t err = hipMalloc(&dev_matrix, (n+1) * sizeof(int *));
if(err != hipSuccess){
printf("Error allocating memory on device.");
return NULL;
}
for(int i = 1; i <= n; i++){
//printf("%x\n", &addr[i]);
int * start;
err = hipMalloc(&start, (n+1)*sizeof(int));
if(err != hipSuccess){
printf("Error allocating memory on device.");
return NULL;
}
hipMemcpy(dev_matrix+i, &start, sizeof(int *), hipMemcpyHostToDevice);
hipMemcpy(start, host_matrix[i], (n+1) * sizeof(int), hipMemcpyHostToDevice);
}
return dev_matrix;
}
int main(int argc, char * argv[])
{
//Matrix
int n = atoi(argv[1]);
int ** matrix = generate_matrix(n);
int ** dev_matrix = copy_matrix_to_device(matrix, n);
if(dev_matrix == NULL)
return 0;
// fw_iterative_outer(dev_matrix, n);
if(n <= 32){
printf("Original matrix: \n");
print_matrix(matrix, n);
}
long long start, end;
start = clock();
int tilesize[2] = {4, INT_MAX};
AFW(dev_matrix, 1, 1, 1, 1, 1, 1, n, 0, tilesize);
end = clock();
int ** new_matrix = copy_matrix_to_host(dev_matrix, n);
if(n <= 32){
printf("\nWith updated distances: \n");
print_matrix(new_matrix, n);
}
cout << "Runtime: " << double(end-start)/double(CLOCKS_PER_SEC) << endl;
return 0;
}
| 7accb1bc0097769e9e6cd558897da009aea4c9e6.cu | #include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <math.h>
#include "../headers/graph.h"
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
#define MAX_THREADS_PER_BLOCK 1024
using namespace std;
__global__
void AloopFW_inner(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int m, int k){
int row_offset = blockIdx.x*blockDim.x + threadIdx.x;
int col_offset = blockIdx.y*blockDim.y + threadIdx.y;
int sum = d_x[u_row_st + row_offset][u_col_st + k] + d_x[v_row_st + k][v_col_st + col_offset];
if(d_x[x_row_st + row_offset][x_col_st + col_offset] > sum)
d_x[x_row_st + row_offset][x_col_st + col_offset] = sum;
/*
int rowsPerThread = m / blockDim.x;
int colsPerThread = m / blockDim.y;
int r_offset_start = threadIdx.x * rowsPerThread;
int r_offset_end = r_offset_start + rowsPerThread - 1;
int c_offset_start = threadIdx.y * colsPerThread;
int c_offset_end = c_offset_start + colsPerThread - 1;
for(int i = r_offset_start; i <= r_offset_end; i++){
for(int j = c_offset_start; j <= c_offset_end; j++){
int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j];
if(d_x[x_row_st + i][x_col_st + j] > sum)
d_x[x_row_st + i][x_col_st + j] = sum;
}
}
*/
}
//Called from host (outermost for loop)
void AloopFW_outer(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int m){
int k;
for(k = 0; k < m; k++){
int threadX = min(m, THREADS_PER_BLOCK_X);
int threadY = min(m, THREADS_PER_BLOCK_Y);
int blocksX = m % threadX == 0 ? m/threadX : m/threadX + 1;
int blocksY = m % threadY == 0 ? m/threadY : m/threadY + 1;
dim3 blocksPerGrid(blocksX, blocksY);
dim3 threadsPerBlock(threadX, threadY);
AloopFW_inner<<<blocksPerGrid, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, m, k);
}
}
__global__
void DloopFW(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int m){
/*
int kPerThread = m / blockDim.z;
int rowsPerThread = m / blockDim.x;
int colsPerThread = m / blockDim.y;
int k_offset_start = threadIdx.z * kPerThread;
int k_offset_end = k_offset_start + kPerThread - 1;
int r_offset_start = threadIdx.x * rowsPerThread;
int r_offset_end = r_offset_start + rowsPerThread - 1;
int c_offset_start = threadIdx.y * colsPerThread;
int c_offset_end = c_offset_start + colsPerThread - 1;
for(int k = k_offset_start; k <= k_offset_end; k++){
for(int i = r_offset_start; i <= r_offset_end; i++){
for(int j = c_offset_start; j <= c_offset_end; j++){
int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j];
if(d_x[x_row_st + i][x_col_st + j] > sum)
d_x[x_row_st + i][x_col_st + j] = sum;
}
}
}
*/
int k = blockIdx.z*blockDim.z + threadIdx.z;
int row_offset = blockIdx.x*blockDim.x + threadIdx.x;
int col_offset = blockIdx.y*blockDim.y + threadIdx.y;
int sum = d_x[u_row_st + row_offset][u_col_st + k] + d_x[v_row_st + k][v_col_st + col_offset];
if(d_x[x_row_st + row_offset][x_col_st + col_offset] > sum)
d_x[x_row_st + row_offset][x_col_st + col_offset] = sum;
}
//Recursive-3 implementation in HW1
void DFW(int ** x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int n, int m){
if(m > n)
return;
if(n == m){
/*
int threadZ = 1;
int threadX = min(m, THREADS_PER_BLOCK_X);
int threadY = min(m, THREADS_PER_BLOCK_Y);
int blockZ = m;
int blockX = m % threadX == 0 ? m/threadX : m/threadX + 1;
int blockY = m % threadY == 0 ? m/threadY : m/threadY + 1;
dim3 blocksPerGrid(blockX, blockY, blockZ);
dim3 threadsPerBlock(threadX, threadY, threadZ);
DloopFW<<<blocksPerGrid, threadsPerBlock>>>(x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, m);
*/
AloopFW_outer(x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, m);
}
else{
int mid = n/2;
//DFW (X11, U11, V11)
DFW(x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, mid, m);
//DFW (X12, U11, V12)
DFW(x, x_row_st, x_col_st + mid, u_row_st, u_col_st, v_row_st, v_col_st + mid, mid, m);
//DFW (X21, U21, V11)
DFW(x, x_row_st + mid, x_col_st, u_row_st + mid, u_col_st, v_row_st, v_col_st, mid, m);
//DFW (X22, U21, V12)
DFW(x, x_row_st + mid, x_col_st + mid, u_row_st + mid, u_col_st, v_row_st, v_col_st + mid, mid, m);
//DFW (X11, U12, V21)
DFW(x, x_row_st, x_col_st, u_row_st, u_col_st + mid, v_row_st + mid, v_col_st, mid, m);
//DFW (X12, U12, V22)
DFW(x, x_row_st, x_col_st + mid, u_row_st, u_col_st + mid, v_row_st + mid, v_col_st + mid, mid, m);
//DFW (X21, U22, V21)
DFW(x, x_row_st + mid, x_col_st, u_row_st + mid, u_col_st + mid, v_row_st + mid, v_col_st, mid, m);
//DFW (X22, U22, V22)
DFW(x, x_row_st + mid, x_col_st + mid, u_row_st + mid, u_col_st + mid, v_row_st + mid, v_col_st + mid, mid, m);
}
}
void DFW(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int n, int depth, int * tilesize){
int r = tilesize[depth];
if(r > n){
//Execute base case
AloopFW_outer(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n);
}
else
printf("Here\n");
}
void CFW(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int n, int depth, int * tilesize){
int r = tilesize[depth];
if(r > n){
//Execute base case
AloopFW_outer(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n);
}
else
printf("Here\n");
}
void BFW(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int n, int depth, int * tilesize){
int r = tilesize[depth];
if(r > n){
//Execute base case
AloopFW_outer(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n);
}
else
printf("Here\n");
}
//Figure 4 implementation : HW 5
void AFW(int ** d_x, int x_row_st, int x_col_st,
int u_row_st, int u_col_st,
int v_row_st, int v_col_st,
int n, int depth, int * tilesize){
int r = tilesize[depth];
if(r > n){
//Execute base case
AloopFW_outer(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n);
}
else{
int sub_size = n / r;
for(int k = 0; k < r; k++){
int offset = k*sub_size;
AFW(d_x, x_row_st + offset, x_col_st + offset, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize);
//SYNC POINT
cudaDeviceSynchronize();
for(int j = 0; j < r; j++){
if(j == k)
continue;
BFW(d_x, x_row_st + offset, x_col_st + j*sub_size, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize);
CFW(d_x, x_row_st + j*sub_size, x_col_st + offset, u_row_st + j*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize);
}
cudaDeviceSynchronize();
for(int i = 0; i < r; i++){
if(i == k)
continue;
for(int j = 0; j < r; j++){
if(j == k)
continue;
DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize);
}
}
}
}
}//AFW
int ** copy_matrix_to_host(int ** dev_matrix, int n){
int ** new_matrix = new int*[n+1];
for(int i=1;i <= n; i++){
new_matrix[i] = new int[n+1];
int * begin;
cudaMemcpy(&begin, &dev_matrix[i], sizeof (int *), cudaMemcpyDeviceToHost);
cudaMemcpy(new_matrix[i], begin, (n+1) * sizeof(int), cudaMemcpyDeviceToHost);
}
return new_matrix;
}
int ** copy_matrix_to_device(int ** host_matrix, int n){
//int ** dev_matrix = new int*[n+1];
int ** dev_matrix;
cudaError_t err = cudaMalloc(&dev_matrix, (n+1) * sizeof(int *));
if(err != cudaSuccess){
printf("Error allocating memory on device.");
return NULL;
}
for(int i = 1; i <= n; i++){
//printf("%x\n", &addr[i]);
int * start;
err = cudaMalloc(&start, (n+1)*sizeof(int));
if(err != cudaSuccess){
printf("Error allocating memory on device.");
return NULL;
}
cudaMemcpy(dev_matrix+i, &start, sizeof(int *), cudaMemcpyHostToDevice);
cudaMemcpy(start, host_matrix[i], (n+1) * sizeof(int), cudaMemcpyHostToDevice);
}
return dev_matrix;
}
int main(int argc, char * argv[])
{
//Matrix
int n = atoi(argv[1]);
int ** matrix = generate_matrix(n);
int ** dev_matrix = copy_matrix_to_device(matrix, n);
if(dev_matrix == NULL)
return 0;
// fw_iterative_outer(dev_matrix, n);
if(n <= 32){
printf("Original matrix: \n");
print_matrix(matrix, n);
}
long long start, end;
start = clock();
int tilesize[2] = {4, INT_MAX};
AFW(dev_matrix, 1, 1, 1, 1, 1, 1, n, 0, tilesize);
end = clock();
int ** new_matrix = copy_matrix_to_host(dev_matrix, n);
if(n <= 32){
printf("\nWith updated distances: \n");
print_matrix(new_matrix, n);
}
cout << "Runtime: " << double(end-start)/double(CLOCKS_PER_SEC) << endl;
return 0;
}
|
87373a4d4d8ec137ecc9b952d80f07c6c4f2165d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line)
{
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int main(int argc, char **argv)
{
printf("[%s] - Starting...\n", argv[0]);
int gpuid[2] = {1, 2}; // we want to find the first two GPU's that can support P2P
float total_time = 0.0;
bool enable_p2p = true;
if (enable_p2p)
{
// Enable peer access
printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid[0], gpuid[1]);
checkCudaErrors(hipSetDevice(gpuid[0]));
checkCudaErrors(hipDeviceEnablePeerAccess(gpuid[1], 0));
checkCudaErrors(hipSetDevice(gpuid[1]));
checkCudaErrors(hipDeviceEnablePeerAccess(gpuid[0], 0));
}
for (int loop = 0; loop < 100; loop++)
{
// Allocate buffers
const size_t buf_size = 1024 * 1024 * 16 * sizeof(float);
printf("Allocating buffers (%iMB on GPU%d, GPU%d and CPU Host)...\n", int(buf_size / 1024 / 1024), gpuid[0], gpuid[1]);
checkCudaErrors(hipSetDevice(gpuid[0]));
float *g0;
checkCudaErrors(hipMalloc(&g0, buf_size));
checkCudaErrors(hipSetDevice(gpuid[1]));
float *g1;
checkCudaErrors(hipMalloc(&g1, buf_size));
// Create CUDA event handles
hipEvent_t start_event, stop_event;
float time_memcpy;
int eventflags = hipEventBlockingSync;
checkCudaErrors(hipEventCreateWithFlags(&start_event, eventflags));
checkCudaErrors(hipEventCreateWithFlags(&stop_event, eventflags));
// P2P memcopy() benchmark
checkCudaErrors(hipEventRecord(start_event, 0));
for (int i = 0; i < 100; i++)
{
// With UVA we don't need to specify source and target devices, the
// runtime figures this out by itself from the pointers
// Ping-pong copy between GPUs
if (i % 2 == 0)
{
checkCudaErrors(hipMemcpy(g1, g0, buf_size, hipMemcpyDefault));
}
else
{
checkCudaErrors(hipMemcpy(g0, g1, buf_size, hipMemcpyDefault));
}
}
checkCudaErrors(hipEventRecord(stop_event, 0));
checkCudaErrors(hipEventSynchronize(stop_event));
checkCudaErrors(hipEventElapsedTime(&time_memcpy, start_event, stop_event));
total_time += time_memcpy;
printf("hipMemcpyPeer / hipMemcpy (%f ms) between GPU%d and GPU%d: %.2fGB/s\n", time_memcpy, gpuid[0], gpuid[1],
(1.0f / (time_memcpy / 1000.0f)) * ((100.0f * buf_size)) / 1024.0f / 1024.0f / 1024.0f);
// Free resources
checkCudaErrors(hipEventDestroy(start_event));
checkCudaErrors(hipEventDestroy(stop_event));
checkCudaErrors(hipSetDevice(gpuid[0]));
checkCudaErrors(hipFree(g0));
checkCudaErrors(hipSetDevice(gpuid[1]));
checkCudaErrors(hipFree(g1));
}
if (enable_p2p)
{
// Disable peer access (also unregisters memory for non-UVA cases)
printf("Disabling peer access...\n");
checkCudaErrors(hipSetDevice(gpuid[0]));
checkCudaErrors(hipDeviceDisablePeerAccess(gpuid[1]));
checkCudaErrors(hipSetDevice(gpuid[1]));
checkCudaErrors(hipDeviceDisablePeerAccess(gpuid[0]));
}
printf("Total time is %.2fs\n", total_time / 1000);
//delete device_handler;
return (EXIT_SUCCESS);
}
| 87373a4d4d8ec137ecc9b952d80f07c6c4f2165d.cu | #include <stdlib.h>
#include <stdio.h>
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int main(int argc, char **argv)
{
printf("[%s] - Starting...\n", argv[0]);
int gpuid[2] = {1, 2}; // we want to find the first two GPU's that can support P2P
float total_time = 0.0;
bool enable_p2p = true;
if (enable_p2p)
{
// Enable peer access
printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[1], 0));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[0], 0));
}
for (int loop = 0; loop < 100; loop++)
{
// Allocate buffers
const size_t buf_size = 1024 * 1024 * 16 * sizeof(float);
printf("Allocating buffers (%iMB on GPU%d, GPU%d and CPU Host)...\n", int(buf_size / 1024 / 1024), gpuid[0], gpuid[1]);
checkCudaErrors(cudaSetDevice(gpuid[0]));
float *g0;
checkCudaErrors(cudaMalloc(&g0, buf_size));
checkCudaErrors(cudaSetDevice(gpuid[1]));
float *g1;
checkCudaErrors(cudaMalloc(&g1, buf_size));
// Create CUDA event handles
cudaEvent_t start_event, stop_event;
float time_memcpy;
int eventflags = cudaEventBlockingSync;
checkCudaErrors(cudaEventCreateWithFlags(&start_event, eventflags));
checkCudaErrors(cudaEventCreateWithFlags(&stop_event, eventflags));
// P2P memcopy() benchmark
checkCudaErrors(cudaEventRecord(start_event, 0));
for (int i = 0; i < 100; i++)
{
// With UVA we don't need to specify source and target devices, the
// runtime figures this out by itself from the pointers
// Ping-pong copy between GPUs
if (i % 2 == 0)
{
checkCudaErrors(cudaMemcpy(g1, g0, buf_size, cudaMemcpyDefault));
}
else
{
checkCudaErrors(cudaMemcpy(g0, g1, buf_size, cudaMemcpyDefault));
}
}
checkCudaErrors(cudaEventRecord(stop_event, 0));
checkCudaErrors(cudaEventSynchronize(stop_event));
checkCudaErrors(cudaEventElapsedTime(&time_memcpy, start_event, stop_event));
total_time += time_memcpy;
printf("cudaMemcpyPeer / cudaMemcpy (%f ms) between GPU%d and GPU%d: %.2fGB/s\n", time_memcpy, gpuid[0], gpuid[1],
(1.0f / (time_memcpy / 1000.0f)) * ((100.0f * buf_size)) / 1024.0f / 1024.0f / 1024.0f);
// Free resources
checkCudaErrors(cudaEventDestroy(start_event));
checkCudaErrors(cudaEventDestroy(stop_event));
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaFree(g0));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaFree(g1));
}
if (enable_p2p)
{
// Disable peer access (also unregisters memory for non-UVA cases)
printf("Disabling peer access...\n");
checkCudaErrors(cudaSetDevice(gpuid[0]));
checkCudaErrors(cudaDeviceDisablePeerAccess(gpuid[1]));
checkCudaErrors(cudaSetDevice(gpuid[1]));
checkCudaErrors(cudaDeviceDisablePeerAccess(gpuid[0]));
}
printf("Total time is %.2fs\n", total_time / 1000);
//delete device_handler;
return (EXIT_SUCCESS);
}
|
8586016f3406a7e2682d6cb9feed2c26ff0799f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA
//-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of reduction with KMPC interface.
//
//===----------------------------------------------------------------------===//
#include <complex.h>
#include <stdio.h>
#include "omptarget-nvptx.h"
#include "target_impl.h"
EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}
EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}
EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) {
return __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, val, delta, size);
}
EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) {
uint32_t lo, hi;
__kmpc_impl_unpack(val, lo, hi);
hi = __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, hi, delta, size);
lo = __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, lo, delta, size);
return __kmpc_impl_pack(lo, hi);
}
INLINE static void gpu_regular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct) {
for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
shflFct(reduce_data, /*LaneId - not used= */ 0,
/*Offset = */ mask, /*AlgoVersion=*/0);
}
}
INLINE static void gpu_irregular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
uint32_t size, uint32_t tid) {
uint32_t curr_size;
uint32_t mask;
curr_size = size;
mask = curr_size / 2;
while (mask > 0) {
shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
curr_size = (curr_size + 1) / 2;
mask = curr_size / 2;
}
}
INLINE static uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
uint32_t size, remote_id, physical_lane_id;
physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
__kmpc_impl_lanemask_t lanemask_lt = __kmpc_impl_lanemask_lt();
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
uint32_t logical_lane_id = __kmpc_impl_popc(Liveness & lanemask_lt) * 2;
__kmpc_impl_lanemask_t lanemask_gt = __kmpc_impl_lanemask_gt();
do {
Liveness = __kmpc_impl_activemask();
remote_id = __kmpc_impl_ffs(Liveness & lanemask_gt);
size = __kmpc_impl_popc(Liveness);
logical_lane_id /= 2;
shflFct(reduce_data, /*LaneId =*/logical_lane_id,
/*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
} while (logical_lane_id % 2 == 0 && size > 1);
return (logical_lane_id == 0);
}
EXTERN
int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) {
gpu_regular_warp_reduce(reduce_data, shflFct);
return GetThreadIdInBlock() % WARPSIZE ==
0; // Result on lane 0 of the simd warp.
} else {
return gpu_irregular_simd_reduce(
reduce_data, shflFct); // Result on the first active lane.
}
}
INLINE
static int32_t nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
bool isSPMDExecutionMode, bool isRuntimeUninitialized) {
uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
uint32_t NumThreads = GetNumberOfOmpThreads(isSPMDExecutionMode);
if (NumThreads == 1)
return 1;
/*
* This reduce function handles reduction within a team. It handles
* parallel regions in both L1 and L2 parallelism levels. It also
* supports Generic, SPMD, and NoOMP modes.
*
* 1. Reduce within a warp.
* 2. Warp master copies value to warp 0 via shared memory.
* 3. Warp 0 reduces to a single value.
* 4. The reduced value is available in the thread that returns 1.
*/
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
// Volta execution model:
// For the Generic execution mode a parallel region either has 1 thread and
// beyond that, always a multiple of 32. For the SPMD execution mode we may
// have any number of threads.
if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/NumThreads % WARPSIZE,
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
}
return BlockThreadId == 0;
#else
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__kmpc_impl_popc(Liveness),
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
// parallel region may enter here; return
// early.
return gpu_irregular_simd_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = BlockThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
} else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
return BlockThreadId == 0;
}
// Get the OMP thread Id. This is different from BlockThreadId in the case of
// an L2 parallel region.
return global_tid == 0;
#endif // __CUDA_ARCH__ >= 700
}
EXTERN __attribute__((deprecated)) int32_t __kmpc_nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct,
isSPMDMode(), isRuntimeUninitialized());
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size,
void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
checkSPMDMode(loc), checkRuntimeUninitialized(loc));
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true);
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true);
}
INLINE
static int32_t nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct,
bool isSPMDExecutionMode) {
uint32_t ThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
isSPMDExecutionMode ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
__shared__ volatile bool IsLastTeam;
// Team masters of all teams write to the scratchpad.
if (ThreadId == 0) {
unsigned int *timestamp = GetTeamsReductionTimestamp();
char *scratchpad = GetTeamsReductionScratchpad();
scratchFct(reduce_data, scratchpad, TeamId, NumTeams);
__threadfence();
// atomicInc increments 'timestamp' and has a range [0, NumTeams-1].
// It resets 'timestamp' back to 0 once the last team increments
// this counter.
unsigned val = atomicInc(timestamp, NumTeams - 1);
IsLastTeam = val == NumTeams - 1;
}
// We have to wait on L1 barrier because in GENERIC mode the workers
// are waiting on barrier 0 for work.
//
// If we guard this barrier as follows it leads to deadlock, probably
// because of a compiler bug: if (!IsGenericMode()) __syncthreads();
uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE;
named_sync(L1_BARRIER, SyncWarps * WARPSIZE);
// If this team is not the last, quit.
if (/* Volatile read by all threads */ !IsLastTeam)
return 0;
//
// Last team processing.
//
// Threads in excess of #teams do not participate in reduction of the
// scratchpad values.
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t ActiveThreads = NumThreads;
if (NumTeams < NumThreads) {
ActiveThreads =
(NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1);
}
if (ThreadId >= ActiveThreads)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = ThreadId / WARPSIZE;
// Reduce across warps to the warp master.
if ((ActiveThreads % WARPSIZE == 0) ||
(WarpId < WarpsNeeded - 1)) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (ActiveThreads > 1) // Partial warp but contiguous lanes
// Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/ActiveThreads % WARPSIZE,
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
if (ActiveThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#else
if (ThreadId >= NumTeams)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
// Reduce across warps to the warp master.
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__kmpc_impl_popc(Liveness),
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads;
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#endif // __CUDA_ARCH__ >= 700
return ThreadId == 0;
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, isSPMDMode());
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/true);
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/false);
}
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple(kmp_Ident *loc,
int32_t global_tid,
kmp_CriticalName *crit) {
if (checkSPMDMode(loc) && GetThreadIdInBlock() != 0)
return 0;
// The master thread of the team actually does the reduction.
while (atomicCAS((uint32_t *)crit, 0, 1))
;
return 1;
}
EXTERN void
__kmpc_nvptx_teams_end_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid,
kmp_CriticalName *crit) {
__threadfence_system();
(void)atomicExch((uint32_t *)crit, 0);
}
INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) {
return checkGenericMode(loc) || IsTeamMaster(ThreadId);
}
INLINE static uint32_t roundToWarpsize(uint32_t s) {
if (s < WARPSIZE)
return 1;
return (s & ~(unsigned)(WARPSIZE - 1));
}
__device__ static volatile uint32_t IterCnt = 0;
__device__ static volatile uint32_t Cnt = 0;
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, void *global_buffer,
int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct,
kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct,
kmp_ListGlobalFctPtr glredFct) {
// Terminate all threads in non-SPMD mode except for the master thread.
if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID())
return 0;
uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
checkSPMDMode(loc) ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
__shared__ unsigned Bound;
__shared__ unsigned ChunkTeamCount;
// Block progress for teams greater than the current upper
// limit. We always only allow a number of teams less or equal
// to the number of slots in the buffer.
bool IsMaster = isMaster(loc, ThreadId);
while (IsMaster) {
// Atomic read
Bound = atomicAdd((uint32_t *)&IterCnt, 0);
if (TeamId < Bound + num_of_records)
break;
}
if (IsMaster) {
int ModBockId = TeamId % num_of_records;
if (TeamId < num_of_records)
lgcpyFct(global_buffer, ModBockId, reduce_data);
else
lgredFct(global_buffer, ModBockId, reduce_data);
__threadfence_system();
// Increment team counter.
// This counter is incremented by all teams in the current
// BUFFER_SIZE chunk.
ChunkTeamCount = atomicInc((uint32_t *)&Cnt, num_of_records - 1);
}
// Synchronize
if (checkSPMDMode(loc))
__kmpc_barrier(loc, global_tid);
// reduce_data is global or shared so before being reduced within the
// warp we need to bring it in local memory:
// local_reduce_data = reduce_data[i]
//
// Example for 3 reduction variables a, b, c (of potentially different
// types):
//
// buffer layout (struct of arrays):
// a, a, ..., a, b, b, ... b, c, c, ... c
// |__________|
// num_of_records
//
// local_data_reduce layout (struct):
// a, b, c
//
// Each thread will have a local struct containing the values to be
// reduced:
// 1. do reduction within each warp.
// 2. do reduction across warps.
// 3. write the final result to the main reduction variable
// by returning 1 in the thread holding the reduction result.
// Check if this is the very last team.
unsigned NumRecs = min(NumTeams, num_of_records);
if (ChunkTeamCount == NumTeams - Bound - 1) {
//
// Last team processing.
//
if (ThreadId >= NumRecs)
return 0;
NumThreads = roundToWarpsize(min(NumThreads, NumRecs));
if (ThreadId >= NumThreads)
return 0;
// Load from buffer and reduce.
glcpyFct(global_buffer, ThreadId, reduce_data);
for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads)
glredFct(global_buffer, i, reduce_data);
// Reduce across warps to the warp master.
if (NumThreads > 1) {
gpu_regular_warp_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = min(NumRecs, NumThreads);
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
ThreadId);
}
}
if (IsMaster) {
Cnt = 0;
IterCnt = 0;
return 1;
}
return 0;
}
if (IsMaster && ChunkTeamCount == num_of_records - 1) {
// Allow SIZE number of teams to proceed writing their
// intermediate results to the global buffer.
atomicAdd((uint32_t *)&IterCnt, num_of_records);
}
return 0;
}
| 8586016f3406a7e2682d6cb9feed2c26ff0799f2.cu | //===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA
//-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of reduction with KMPC interface.
//
//===----------------------------------------------------------------------===//
#include <complex.h>
#include <stdio.h>
#include "omptarget-nvptx.h"
#include "target_impl.h"
EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}
EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}
EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) {
return __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, val, delta, size);
}
EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) {
uint32_t lo, hi;
__kmpc_impl_unpack(val, lo, hi);
hi = __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, hi, delta, size);
lo = __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, lo, delta, size);
return __kmpc_impl_pack(lo, hi);
}
INLINE static void gpu_regular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct) {
for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
shflFct(reduce_data, /*LaneId - not used= */ 0,
/*Offset = */ mask, /*AlgoVersion=*/0);
}
}
INLINE static void gpu_irregular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
uint32_t size, uint32_t tid) {
uint32_t curr_size;
uint32_t mask;
curr_size = size;
mask = curr_size / 2;
while (mask > 0) {
shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
curr_size = (curr_size + 1) / 2;
mask = curr_size / 2;
}
}
INLINE static uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
uint32_t size, remote_id, physical_lane_id;
physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
__kmpc_impl_lanemask_t lanemask_lt = __kmpc_impl_lanemask_lt();
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
uint32_t logical_lane_id = __kmpc_impl_popc(Liveness & lanemask_lt) * 2;
__kmpc_impl_lanemask_t lanemask_gt = __kmpc_impl_lanemask_gt();
do {
Liveness = __kmpc_impl_activemask();
remote_id = __kmpc_impl_ffs(Liveness & lanemask_gt);
size = __kmpc_impl_popc(Liveness);
logical_lane_id /= 2;
shflFct(reduce_data, /*LaneId =*/logical_lane_id,
/*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
} while (logical_lane_id % 2 == 0 && size > 1);
return (logical_lane_id == 0);
}
EXTERN
int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) {
gpu_regular_warp_reduce(reduce_data, shflFct);
return GetThreadIdInBlock() % WARPSIZE ==
0; // Result on lane 0 of the simd warp.
} else {
return gpu_irregular_simd_reduce(
reduce_data, shflFct); // Result on the first active lane.
}
}
INLINE
static int32_t nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
bool isSPMDExecutionMode, bool isRuntimeUninitialized) {
uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
uint32_t NumThreads = GetNumberOfOmpThreads(isSPMDExecutionMode);
if (NumThreads == 1)
return 1;
/*
* This reduce function handles reduction within a team. It handles
* parallel regions in both L1 and L2 parallelism levels. It also
* supports Generic, SPMD, and NoOMP modes.
*
* 1. Reduce within a warp.
* 2. Warp master copies value to warp 0 via shared memory.
* 3. Warp 0 reduces to a single value.
* 4. The reduced value is available in the thread that returns 1.
*/
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
// Volta execution model:
// For the Generic execution mode a parallel region either has 1 thread and
// beyond that, always a multiple of 32. For the SPMD execution mode we may
// have any number of threads.
if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/NumThreads % WARPSIZE,
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
}
return BlockThreadId == 0;
#else
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__kmpc_impl_popc(Liveness),
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
// parallel region may enter here; return
// early.
return gpu_irregular_simd_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = BlockThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
} else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
return BlockThreadId == 0;
}
// Get the OMP thread Id. This is different from BlockThreadId in the case of
// an L2 parallel region.
return global_tid == 0;
#endif // __CUDA_ARCH__ >= 700
}
EXTERN __attribute__((deprecated)) int32_t __kmpc_nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct,
isSPMDMode(), isRuntimeUninitialized());
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size,
void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
checkSPMDMode(loc), checkRuntimeUninitialized(loc));
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true);
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true);
}
INLINE
static int32_t nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct,
bool isSPMDExecutionMode) {
uint32_t ThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
isSPMDExecutionMode ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
__shared__ volatile bool IsLastTeam;
// Team masters of all teams write to the scratchpad.
if (ThreadId == 0) {
unsigned int *timestamp = GetTeamsReductionTimestamp();
char *scratchpad = GetTeamsReductionScratchpad();
scratchFct(reduce_data, scratchpad, TeamId, NumTeams);
__threadfence();
// atomicInc increments 'timestamp' and has a range [0, NumTeams-1].
// It resets 'timestamp' back to 0 once the last team increments
// this counter.
unsigned val = atomicInc(timestamp, NumTeams - 1);
IsLastTeam = val == NumTeams - 1;
}
// We have to wait on L1 barrier because in GENERIC mode the workers
// are waiting on barrier 0 for work.
//
// If we guard this barrier as follows it leads to deadlock, probably
// because of a compiler bug: if (!IsGenericMode()) __syncthreads();
uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE;
named_sync(L1_BARRIER, SyncWarps * WARPSIZE);
// If this team is not the last, quit.
if (/* Volatile read by all threads */ !IsLastTeam)
return 0;
//
// Last team processing.
//
// Threads in excess of #teams do not participate in reduction of the
// scratchpad values.
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t ActiveThreads = NumThreads;
if (NumTeams < NumThreads) {
ActiveThreads =
(NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1);
}
if (ThreadId >= ActiveThreads)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = ThreadId / WARPSIZE;
// Reduce across warps to the warp master.
if ((ActiveThreads % WARPSIZE == 0) ||
(WarpId < WarpsNeeded - 1)) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (ActiveThreads > 1) // Partial warp but contiguous lanes
// Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/ActiveThreads % WARPSIZE,
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
if (ActiveThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#else
if (ThreadId >= NumTeams)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
// Reduce across warps to the warp master.
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__kmpc_impl_popc(Liveness),
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads;
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#endif // __CUDA_ARCH__ >= 700
return ThreadId == 0;
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, isSPMDMode());
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/true);
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/false);
}
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple(kmp_Ident *loc,
int32_t global_tid,
kmp_CriticalName *crit) {
if (checkSPMDMode(loc) && GetThreadIdInBlock() != 0)
return 0;
// The master thread of the team actually does the reduction.
while (atomicCAS((uint32_t *)crit, 0, 1))
;
return 1;
}
EXTERN void
__kmpc_nvptx_teams_end_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid,
kmp_CriticalName *crit) {
__threadfence_system();
(void)atomicExch((uint32_t *)crit, 0);
}
INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) {
return checkGenericMode(loc) || IsTeamMaster(ThreadId);
}
INLINE static uint32_t roundToWarpsize(uint32_t s) {
if (s < WARPSIZE)
return 1;
return (s & ~(unsigned)(WARPSIZE - 1));
}
__device__ static volatile uint32_t IterCnt = 0;
__device__ static volatile uint32_t Cnt = 0;
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, void *global_buffer,
int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct,
kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct,
kmp_ListGlobalFctPtr glredFct) {
// Terminate all threads in non-SPMD mode except for the master thread.
if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID())
return 0;
uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
checkSPMDMode(loc) ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
__shared__ unsigned Bound;
__shared__ unsigned ChunkTeamCount;
// Block progress for teams greater than the current upper
// limit. We always only allow a number of teams less or equal
// to the number of slots in the buffer.
bool IsMaster = isMaster(loc, ThreadId);
while (IsMaster) {
// Atomic read
Bound = atomicAdd((uint32_t *)&IterCnt, 0);
if (TeamId < Bound + num_of_records)
break;
}
if (IsMaster) {
int ModBockId = TeamId % num_of_records;
if (TeamId < num_of_records)
lgcpyFct(global_buffer, ModBockId, reduce_data);
else
lgredFct(global_buffer, ModBockId, reduce_data);
__threadfence_system();
// Increment team counter.
// This counter is incremented by all teams in the current
// BUFFER_SIZE chunk.
ChunkTeamCount = atomicInc((uint32_t *)&Cnt, num_of_records - 1);
}
// Synchronize
if (checkSPMDMode(loc))
__kmpc_barrier(loc, global_tid);
// reduce_data is global or shared so before being reduced within the
// warp we need to bring it in local memory:
// local_reduce_data = reduce_data[i]
//
// Example for 3 reduction variables a, b, c (of potentially different
// types):
//
// buffer layout (struct of arrays):
// a, a, ..., a, b, b, ... b, c, c, ... c
// |__________|
// num_of_records
//
// local_data_reduce layout (struct):
// a, b, c
//
// Each thread will have a local struct containing the values to be
// reduced:
// 1. do reduction within each warp.
// 2. do reduction across warps.
// 3. write the final result to the main reduction variable
// by returning 1 in the thread holding the reduction result.
// Check if this is the very last team.
unsigned NumRecs = min(NumTeams, num_of_records);
if (ChunkTeamCount == NumTeams - Bound - 1) {
//
// Last team processing.
//
if (ThreadId >= NumRecs)
return 0;
NumThreads = roundToWarpsize(min(NumThreads, NumRecs));
if (ThreadId >= NumThreads)
return 0;
// Load from buffer and reduce.
glcpyFct(global_buffer, ThreadId, reduce_data);
for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads)
glredFct(global_buffer, i, reduce_data);
// Reduce across warps to the warp master.
if (NumThreads > 1) {
gpu_regular_warp_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = min(NumRecs, NumThreads);
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
ThreadId);
}
}
if (IsMaster) {
Cnt = 0;
IterCnt = 0;
return 1;
}
return 0;
}
if (IsMaster && ChunkTeamCount == num_of_records - 1) {
// Allow SIZE number of teams to proceed writing their
// intermediate results to the global buffer.
atomicAdd((uint32_t *)&IterCnt, num_of_records);
}
return 0;
}
|
10856fb18c6319f77c1b8555ad9f3fd4b366a095.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include "v1.h"
#include "utils.cuh"
int main(int argc, char* argv[])
{
printf(CYN "VERSION 2\n" RESET);
if (argc < 5) {
printf("Bad arguments\n");
printf("USAGE ./bin/v1 <noised_image.txt> <patch size> <grid size> <block size> --debug\n");
printf("--debug is optional\n");
exit(-1);
}
FILE* noise_image_file;
if ((noise_image_file = fopen(argv[1], "r")) == NULL) {
printf("Can't open file\n");
exit(-1);
}
int patch_size = atoi(argv[2]);
int block_size = atoi(argv[3]);
int threads = atoi(argv[4]);
assert(patch_size % 2 == 1);
struct timespec tic;
struct timespec toc;
int m, n;
fscanf(noise_image_file, "%d", &m);
fscanf(noise_image_file, "%d", &n);
printf("Reading image...\n");
float* noise_image_array;
MALLOC(float, noise_image_array, m* n);
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (fscanf(noise_image_file, "%f", &noise_image_array[i * n + j]) != 1)
exit(-1);
}
}
fclose(noise_image_file);
// print_array(noise_image_array, m, n);
float filt_sigma = 0.02;
float patch_sigma = 5.0 / 3.0;
printf("Non-local means filtering...\n");
float* filtered_image_array;
hipMallocManaged(&filtered_image_array, m * n * sizeof(float));
TIC()
non_local_means(filtered_image_array, m, n, noise_image_array, patch_size, filt_sigma, patch_sigma, argc, argv);
TOC("\nTotal time elapsed filtering image: %lf\n")
/* ------------------------------ Save results ------------------------------ */
char* resultsPath = "./results/results.csv";
FILE* fp;
if ((fp = fopen(resultsPath, "a+")) == NULL) {
printf("File does not exist.\nExiting...");
exit(1);
}
fprintf(fp, "%s,%s,%d,%d,%d,%lf\n", argv[0], argv[1], patch_size, block_size, threads, diff_time(tic, toc));
/* ---------------------------------- Other --------------------------------- */
printf("\nWriting output data to file...\n");
FILE* filtered_image_file;
filtered_image_file = fopen("data/filtered_image_v2.txt", "w");
print_array_file(filtered_image_file, filtered_image_array, m, n);
fclose(filtered_image_file);
hipFree(filtered_image_array);
free(noise_image_array);
return 0;
}
| 10856fb18c6319f77c1b8555ad9f3fd4b366a095.cu | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include "v1.h"
#include "utils.cuh"
int main(int argc, char* argv[])
{
printf(CYN "VERSION 2\n" RESET);
if (argc < 5) {
printf("Bad arguments\n");
printf("USAGE ./bin/v1 <noised_image.txt> <patch size> <grid size> <block size> --debug\n");
printf("--debug is optional\n");
exit(-1);
}
FILE* noise_image_file;
if ((noise_image_file = fopen(argv[1], "r")) == NULL) {
printf("Can't open file\n");
exit(-1);
}
int patch_size = atoi(argv[2]);
int block_size = atoi(argv[3]);
int threads = atoi(argv[4]);
assert(patch_size % 2 == 1);
struct timespec tic;
struct timespec toc;
int m, n;
fscanf(noise_image_file, "%d", &m);
fscanf(noise_image_file, "%d", &n);
printf("Reading image...\n");
float* noise_image_array;
MALLOC(float, noise_image_array, m* n);
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (fscanf(noise_image_file, "%f", &noise_image_array[i * n + j]) != 1)
exit(-1);
}
}
fclose(noise_image_file);
// print_array(noise_image_array, m, n);
float filt_sigma = 0.02;
float patch_sigma = 5.0 / 3.0;
printf("Non-local means filtering...\n");
float* filtered_image_array;
cudaMallocManaged(&filtered_image_array, m * n * sizeof(float));
TIC()
non_local_means(filtered_image_array, m, n, noise_image_array, patch_size, filt_sigma, patch_sigma, argc, argv);
TOC("\nTotal time elapsed filtering image: %lf\n")
/* ------------------------------ Save results ------------------------------ */
char* resultsPath = "./results/results.csv";
FILE* fp;
if ((fp = fopen(resultsPath, "a+")) == NULL) {
printf("File does not exist.\nExiting...");
exit(1);
}
fprintf(fp, "%s,%s,%d,%d,%d,%lf\n", argv[0], argv[1], patch_size, block_size, threads, diff_time(tic, toc));
/* ---------------------------------- Other --------------------------------- */
printf("\nWriting output data to file...\n");
FILE* filtered_image_file;
filtered_image_file = fopen("data/filtered_image_v2.txt", "w");
print_array_file(filtered_image_file, filtered_image_array, m, n);
fclose(filtered_image_file);
cudaFree(filtered_image_array);
free(noise_image_array);
return 0;
}
|
99bbfdd423add427a42caeec0bf601e221531254.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
#include <array>
#include <vector>
#include <sstream>
#include <chrono>
#include <hiprand/hiprand_kernel.h>
#include <limits>
#include <memory>
#include <cstddef>
#include <type_traits>
#include "../include/timer.cuh"
#include "../include/musket.cuh"
#include "../include/spfb16_0.cuh"
const double PI = 3.141592653589793;
//Float2::Float2() : x(), y() {}
struct FIR_map_index_in_place_array_functor{
FIR_map_index_in_place_array_functor(const mkt::DArray<float>& _input, const mkt::DArray<float>& _coeff) : input(_input), coeff(_coeff){}
~FIR_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 a){
float2 newa;
newa.x = 0.0f;
newa.y = 0.0f;
if(((Index) <= ((channels) * (spectra)))){
for(int j = 0; ((j) < (taps)); j++){
newa.x += (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
input.get_data_local(((Index) + ((j) * (channels))))
* // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
coeff.get_data_local((Index%(taps*channels)) + (j) * (channels))
);
}
}
return (newa);
}
void init(int device){
input.init(device);
coeff.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int taps;
int channels;
int spectra;
mkt::DeviceArray<float> input;
mkt::DeviceArray<float> coeff;
};
struct Fetch_map_index_in_place_array_functor{
Fetch_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_output) : c_output(_c_output){}
~Fetch_map_index_in_place_array_functor() {}
__device__
auto operator()(int i, float2 Ti){
return // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_output.get_data_local((i ^ (int) __powf(2, (((log2size) - 1) - (counter)))))
;
}
void init(int device){
c_output.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
mkt::DeviceArray<float2> c_output;
};
struct Combine_map_index_in_place_array_functor{
Combine_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_input_double) : c_input_double(_c_input_double){}
~Combine_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 Ai){
float2 newa;
newa.x = 0.0f;
newa.y = 0.0f;
int b = Index >> (log2size - counter - 1);
int b2 = 0;
for(int l = 0;l <= counter;l++) {
b2 = (b & 1) ? 2 * b2 + 1 : 2 * b2;
b >>= 1;
}
double temp = 2.0 * pi / Problemsize * (b2 << (log2size - counter - 1));
float2 intermediateresult;
intermediateresult.x = __cosf(temp);
intermediateresult.y = -__sinf(temp);
if(((Index) == __powf(2, (((log2size) - 1) - (counter))))){
float2 mult_res;
mult_res.x = (((intermediateresult).x * (Ai).x) - ((intermediateresult).y * (Ai).y));
mult_res.y = (((intermediateresult).x * (Ai).y) + ((intermediateresult).y * (Ai).x));
float2 add_res;
add_res.x = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
+ (mult_res).x);
add_res.y = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
+ (mult_res).y);
newa = (add_res);
}
else {
float2 mult_res2;
mult_res2.x = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
) - ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
));
mult_res2.y = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
) + ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
));
float2 add_res2;
add_res2.x = ((Ai).x + (mult_res2).x);
add_res2.y = ((Ai).y + (mult_res2).y);
newa = (add_res2);
}
return (newa);
}
void init(int device){
c_input_double.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
double pi;
int Problemsize;
mkt::DeviceArray<float2> c_input_double;
};
int main(int argc, char** argv) {
mkt::init();
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point complete_timer_start = std::chrono::high_resolution_clock::now();
GpuTimer timer;
double fir_time=0.0, fft_time =0.0,allocation = 0.0,fill = 0.0, rest = 0.0, rest2 = 0.0, out = 0.0;
timer.Start();
mkt::DArray<float> input(0, 201334784, 201334784, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_input_double(0, 201326592, 201326592, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_output(0, 201326592, 201326592, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float> coeff(0, 512, 512, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
timer.Stop();
allocation += timer.Elapsed();
// timer.Start();
srand(1);
for (int n = 0; n < 201334784; n++) {
input[n] = (rand() / (float)RAND_MAX);
}
for (int n = 0; n < 512; n++) {
coeff[n] = (rand() / (float)RAND_MAX);
}
timer.Start();
input.update_devices();
coeff.update_devices();
timer.Stop();
fill += timer.Elapsed();
timer.Start();
FIR_map_index_in_place_array_functor fIR_map_index_in_place_array_functor{input, coeff};
Fetch_map_index_in_place_array_functor fetch_map_index_in_place_array_functor{c_output};
Combine_map_index_in_place_array_functor combine_map_index_in_place_array_functor{c_input_double};
timer.Stop();
rest += timer.Elapsed();
int ntaps = 32;
int nchans = 16;
int nspectra = 12582912;
int log2size = 4;
timer.Start();
fIR_map_index_in_place_array_functor.taps = (ntaps);fIR_map_index_in_place_array_functor.channels = (nchans);fIR_map_index_in_place_array_functor.spectra = (nspectra);
mkt::map_index_in_place<float2, FIR_map_index_in_place_array_functor>(c_output, fIR_map_index_in_place_array_functor);
timer.Stop();
fir_time += timer.Elapsed();
timer.Start();
for(int j = 0; ((j) < (log2size)); j++){
fetch_map_index_in_place_array_functor.counter = (j);fetch_map_index_in_place_array_functor.log2size = (log2size);
mkt::map_index_in_place<float2, Fetch_map_index_in_place_array_functor>(c_input_double, fetch_map_index_in_place_array_functor);
combine_map_index_in_place_array_functor.counter = (j);combine_map_index_in_place_array_functor.log2size = (log2size);combine_map_index_in_place_array_functor.pi = (PI);combine_map_index_in_place_array_functor.Problemsize = 16;
mkt::map_index_in_place<float2, Combine_map_index_in_place_array_functor>(c_output, combine_map_index_in_place_array_functor);
}
mkt::sync_streams();
timer.Stop();
fft_time += timer.Elapsed();
timer.Start();
c_output.update_self();
timer.Stop();
out += timer.Elapsed();
printf("\n%f;%f;%f;%f;%f;%f\n", fir_time, fft_time, allocation, fill, rest, out);
return EXIT_SUCCESS;
}
| 99bbfdd423add427a42caeec0bf601e221531254.cu | #include <cuda.h>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
#include <array>
#include <vector>
#include <sstream>
#include <chrono>
#include <curand_kernel.h>
#include <limits>
#include <memory>
#include <cstddef>
#include <type_traits>
#include "../include/timer.cuh"
#include "../include/musket.cuh"
#include "../include/spfb16_0.cuh"
const double PI = 3.141592653589793;
//Float2::Float2() : x(), y() {}
struct FIR_map_index_in_place_array_functor{
FIR_map_index_in_place_array_functor(const mkt::DArray<float>& _input, const mkt::DArray<float>& _coeff) : input(_input), coeff(_coeff){}
~FIR_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 a){
float2 newa;
newa.x = 0.0f;
newa.y = 0.0f;
if(((Index) <= ((channels) * (spectra)))){
for(int j = 0; ((j) < (taps)); j++){
newa.x += (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
input.get_data_local(((Index) + ((j) * (channels))))
* // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
coeff.get_data_local((Index%(taps*channels)) + (j) * (channels))
);
}
}
return (newa);
}
void init(int device){
input.init(device);
coeff.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int taps;
int channels;
int spectra;
mkt::DeviceArray<float> input;
mkt::DeviceArray<float> coeff;
};
struct Fetch_map_index_in_place_array_functor{
Fetch_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_output) : c_output(_c_output){}
~Fetch_map_index_in_place_array_functor() {}
__device__
auto operator()(int i, float2 Ti){
return // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_output.get_data_local((i ^ (int) __powf(2, (((log2size) - 1) - (counter)))))
;
}
void init(int device){
c_output.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
mkt::DeviceArray<float2> c_output;
};
struct Combine_map_index_in_place_array_functor{
Combine_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_input_double) : c_input_double(_c_input_double){}
~Combine_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 Ai){
float2 newa;
newa.x = 0.0f;
newa.y = 0.0f;
int b = Index >> (log2size - counter - 1);
int b2 = 0;
for(int l = 0;l <= counter;l++) {
b2 = (b & 1) ? 2 * b2 + 1 : 2 * b2;
b >>= 1;
}
double temp = 2.0 * pi / Problemsize * (b2 << (log2size - counter - 1));
float2 intermediateresult;
intermediateresult.x = __cosf(temp);
intermediateresult.y = -__sinf(temp);
if(((Index) == __powf(2, (((log2size) - 1) - (counter))))){
float2 mult_res;
mult_res.x = (((intermediateresult).x * (Ai).x) - ((intermediateresult).y * (Ai).y));
mult_res.y = (((intermediateresult).x * (Ai).y) + ((intermediateresult).y * (Ai).x));
float2 add_res;
add_res.x = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
+ (mult_res).x);
add_res.y = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
+ (mult_res).y);
newa = (add_res);
}
else {
float2 mult_res2;
mult_res2.x = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
) - ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
));
mult_res2.y = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
) + ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
));
float2 add_res2;
add_res2.x = ((Ai).x + (mult_res2).x);
add_res2.y = ((Ai).y + (mult_res2).y);
newa = (add_res2);
}
return (newa);
}
void init(int device){
c_input_double.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
double pi;
int Problemsize;
mkt::DeviceArray<float2> c_input_double;
};
int main(int argc, char** argv) {
mkt::init();
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point complete_timer_start = std::chrono::high_resolution_clock::now();
GpuTimer timer;
double fir_time=0.0, fft_time =0.0,allocation = 0.0,fill = 0.0, rest = 0.0, rest2 = 0.0, out = 0.0;
timer.Start();
mkt::DArray<float> input(0, 201334784, 201334784, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_input_double(0, 201326592, 201326592, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_output(0, 201326592, 201326592, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float> coeff(0, 512, 512, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
timer.Stop();
allocation += timer.Elapsed();
// timer.Start();
srand(1);
for (int n = 0; n < 201334784; n++) {
input[n] = (rand() / (float)RAND_MAX);
}
for (int n = 0; n < 512; n++) {
coeff[n] = (rand() / (float)RAND_MAX);
}
timer.Start();
input.update_devices();
coeff.update_devices();
timer.Stop();
fill += timer.Elapsed();
timer.Start();
FIR_map_index_in_place_array_functor fIR_map_index_in_place_array_functor{input, coeff};
Fetch_map_index_in_place_array_functor fetch_map_index_in_place_array_functor{c_output};
Combine_map_index_in_place_array_functor combine_map_index_in_place_array_functor{c_input_double};
timer.Stop();
rest += timer.Elapsed();
int ntaps = 32;
int nchans = 16;
int nspectra = 12582912;
int log2size = 4;
timer.Start();
fIR_map_index_in_place_array_functor.taps = (ntaps);fIR_map_index_in_place_array_functor.channels = (nchans);fIR_map_index_in_place_array_functor.spectra = (nspectra);
mkt::map_index_in_place<float2, FIR_map_index_in_place_array_functor>(c_output, fIR_map_index_in_place_array_functor);
timer.Stop();
fir_time += timer.Elapsed();
timer.Start();
for(int j = 0; ((j) < (log2size)); j++){
fetch_map_index_in_place_array_functor.counter = (j);fetch_map_index_in_place_array_functor.log2size = (log2size);
mkt::map_index_in_place<float2, Fetch_map_index_in_place_array_functor>(c_input_double, fetch_map_index_in_place_array_functor);
combine_map_index_in_place_array_functor.counter = (j);combine_map_index_in_place_array_functor.log2size = (log2size);combine_map_index_in_place_array_functor.pi = (PI);combine_map_index_in_place_array_functor.Problemsize = 16;
mkt::map_index_in_place<float2, Combine_map_index_in_place_array_functor>(c_output, combine_map_index_in_place_array_functor);
}
mkt::sync_streams();
timer.Stop();
fft_time += timer.Elapsed();
timer.Start();
c_output.update_self();
timer.Stop();
out += timer.Elapsed();
printf("\n%f;%f;%f;%f;%f;%f\n", fir_time, fft_time, allocation, fill, rest, out);
return EXIT_SUCCESS;
}
|
1cad9d641df9b435071b9328ac5a310e6890d207.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <algorithm>
#include <iostream>
#include <metrics/homogeneity_score.cuh>
#include <raft/mr/device/allocator.hpp>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct homogeneityParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
//test fixture class
template <typename T>
class homogeneityTest : public ::testing::TestWithParam<homogeneityParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<homogeneityParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
//generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
//allocating and initializing memory to the GPU
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(truthClusterArray, nElements, true);
raft::allocate(predClusterArray, nElements, true);
raft::update_device(truthClusterArray, &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray, &arr2[0], (int)nElements, stream);
std::shared_ptr<raft::mr::device::allocator> allocator(
new raft::mr::device::default_allocator);
//calculating the golden output
double truthMI, truthEntropy;
truthMI = MLCommon::Metrics::mutual_info_score(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
truthEntropy =
MLCommon::Metrics::entropy(truthClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
if (truthEntropy) {
truthHomogeneity = truthMI / truthEntropy;
} else
truthHomogeneity = 1.0;
if (nElements == 0) truthHomogeneity = 1.0;
//calling the homogeneity CUDA implementation
computedHomogeneity = MLCommon::Metrics::homogeneity_score(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(hipFree(truthClusterArray));
CUDA_CHECK(hipFree(predClusterArray));
CUDA_CHECK(hipStreamDestroy(stream));
}
//declaring the data values
homogeneityParam params;
T lowerLabelRange, upperLabelRange;
T* truthClusterArray = nullptr;
T* predClusterArray = nullptr;
int nElements = 0;
double truthHomogeneity = 0;
double computedHomogeneity = 0;
hipStream_t stream;
};
//setting test parameter values
const std::vector<homogeneityParam> inputs = {
{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}};
//writing the test suite
typedef homogeneityTest<int> homogeneityTestClass;
TEST_P(homogeneityTestClass, Result) {
ASSERT_NEAR(computedHomogeneity, truthHomogeneity, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(homogeneity, homogeneityTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
| 1cad9d641df9b435071b9328ac5a310e6890d207.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <algorithm>
#include <iostream>
#include <metrics/homogeneity_score.cuh>
#include <raft/mr/device/allocator.hpp>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct homogeneityParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
//test fixture class
template <typename T>
class homogeneityTest : public ::testing::TestWithParam<homogeneityParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<homogeneityParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
//generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
//allocating and initializing memory to the GPU
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(truthClusterArray, nElements, true);
raft::allocate(predClusterArray, nElements, true);
raft::update_device(truthClusterArray, &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray, &arr2[0], (int)nElements, stream);
std::shared_ptr<raft::mr::device::allocator> allocator(
new raft::mr::device::default_allocator);
//calculating the golden output
double truthMI, truthEntropy;
truthMI = MLCommon::Metrics::mutual_info_score(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
truthEntropy =
MLCommon::Metrics::entropy(truthClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
if (truthEntropy) {
truthHomogeneity = truthMI / truthEntropy;
} else
truthHomogeneity = 1.0;
if (nElements == 0) truthHomogeneity = 1.0;
//calling the homogeneity CUDA implementation
computedHomogeneity = MLCommon::Metrics::homogeneity_score(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(cudaFree(truthClusterArray));
CUDA_CHECK(cudaFree(predClusterArray));
CUDA_CHECK(cudaStreamDestroy(stream));
}
//declaring the data values
homogeneityParam params;
T lowerLabelRange, upperLabelRange;
T* truthClusterArray = nullptr;
T* predClusterArray = nullptr;
int nElements = 0;
double truthHomogeneity = 0;
double computedHomogeneity = 0;
cudaStream_t stream;
};
//setting test parameter values
const std::vector<homogeneityParam> inputs = {
{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}};
//writing the test suite
typedef homogeneityTest<int> homogeneityTestClass;
TEST_P(homogeneityTestClass, Result) {
ASSERT_NEAR(computedHomogeneity, truthHomogeneity, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(homogeneity, homogeneityTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
|
4a23ddb5df4bfbb7b2ce8cf5b2fe54b7f96c1d94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_l2 [8][2];
static int dims_update_halo_kernel1_l2_h [8][2] = {0};
//user function
__device__
inline void update_halo_kernel1_l2_gpu(ACC<double> &density0,
ACC<double> &density1,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &pressure,
ACC<double> &viscosity,
ACC<double> &soundspeed,
const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(3,0,0);
if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(3,0,0);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(3,0,0);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(3,0,0);
if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(3,0,0);
if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(3,0,0);
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(3,0,0);
}
__global__ void ops_update_halo_kernel1_l2(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[0][0] * dims_update_halo_kernel1_l2[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[1][0] * dims_update_halo_kernel1_l2[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[2][0] * dims_update_halo_kernel1_l2[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[3][0] * dims_update_halo_kernel1_l2[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[4][0] * dims_update_halo_kernel1_l2[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[5][0] * dims_update_halo_kernel1_l2[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[6][0] * dims_update_halo_kernel1_l2[6][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel1_l2[0][0], dims_update_halo_kernel1_l2[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel1_l2[1][0], dims_update_halo_kernel1_l2[1][1], arg1);
ACC<double> argp2(dims_update_halo_kernel1_l2[2][0], dims_update_halo_kernel1_l2[2][1], arg2);
ACC<double> argp3(dims_update_halo_kernel1_l2[3][0], dims_update_halo_kernel1_l2[3][1], arg3);
ACC<double> argp4(dims_update_halo_kernel1_l2[4][0], dims_update_halo_kernel1_l2[4][1], arg4);
ACC<double> argp5(dims_update_halo_kernel1_l2[5][0], dims_update_halo_kernel1_l2[5][1], arg5);
ACC<double> argp6(dims_update_halo_kernel1_l2[6][0], dims_update_halo_kernel1_l2[6][1], arg6);
update_halo_kernel1_l2_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_l2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,15)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(15,"update_halo_kernel1_l2");
OPS_kernels[15].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != dims_update_halo_kernel1_l2_h[0][0] || ydim0 != dims_update_halo_kernel1_l2_h[0][1] || xdim1 != dims_update_halo_kernel1_l2_h[1][0] || ydim1 != dims_update_halo_kernel1_l2_h[1][1] || xdim2 != dims_update_halo_kernel1_l2_h[2][0] || ydim2 != dims_update_halo_kernel1_l2_h[2][1] || xdim3 != dims_update_halo_kernel1_l2_h[3][0] || ydim3 != dims_update_halo_kernel1_l2_h[3][1] || xdim4 != dims_update_halo_kernel1_l2_h[4][0] || ydim4 != dims_update_halo_kernel1_l2_h[4][1] || xdim5 != dims_update_halo_kernel1_l2_h[5][0] || ydim5 != dims_update_halo_kernel1_l2_h[5][1] || xdim6 != dims_update_halo_kernel1_l2_h[6][0] || ydim6 != dims_update_halo_kernel1_l2_h[6][1]) {
dims_update_halo_kernel1_l2_h[0][0] = xdim0;
dims_update_halo_kernel1_l2_h[0][1] = ydim0;
dims_update_halo_kernel1_l2_h[1][0] = xdim1;
dims_update_halo_kernel1_l2_h[1][1] = ydim1;
dims_update_halo_kernel1_l2_h[2][0] = xdim2;
dims_update_halo_kernel1_l2_h[2][1] = ydim2;
dims_update_halo_kernel1_l2_h[3][0] = xdim3;
dims_update_halo_kernel1_l2_h[3][1] = ydim3;
dims_update_halo_kernel1_l2_h[4][0] = xdim4;
dims_update_halo_kernel1_l2_h[4][1] = ydim4;
dims_update_halo_kernel1_l2_h[5][0] = xdim5;
dims_update_halo_kernel1_l2_h[5][1] = ydim5;
dims_update_halo_kernel1_l2_h[6][0] = xdim6;
dims_update_halo_kernel1_l2_h[6][1] = ydim6;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel1_l2, dims_update_halo_kernel1_l2_h, sizeof(dims_update_halo_kernel1_l2)));
}
int *arg7h = (int *)arg7.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[15].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_l2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[15].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[15].mpi_time += t2-t1;
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 15;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 15;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_l2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(15,"update_halo_kernel1_l2");
}
ops_enqueue_kernel(desc);
}
#endif
| 4a23ddb5df4bfbb7b2ce8cf5b2fe54b7f96c1d94.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_l2 [8][2];
static int dims_update_halo_kernel1_l2_h [8][2] = {0};
//user function
__device__
inline void update_halo_kernel1_l2_gpu(ACC<double> &density0,
ACC<double> &density1,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &pressure,
ACC<double> &viscosity,
ACC<double> &soundspeed,
const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(3,0,0);
if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(3,0,0);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(3,0,0);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(3,0,0);
if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(3,0,0);
if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(3,0,0);
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(3,0,0);
}
__global__ void ops_update_halo_kernel1_l2(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[0][0] * dims_update_halo_kernel1_l2[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[1][0] * dims_update_halo_kernel1_l2[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[2][0] * dims_update_halo_kernel1_l2[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[3][0] * dims_update_halo_kernel1_l2[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[4][0] * dims_update_halo_kernel1_l2[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[5][0] * dims_update_halo_kernel1_l2[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_l2[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_l2[6][0] * dims_update_halo_kernel1_l2[6][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel1_l2[0][0], dims_update_halo_kernel1_l2[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel1_l2[1][0], dims_update_halo_kernel1_l2[1][1], arg1);
ACC<double> argp2(dims_update_halo_kernel1_l2[2][0], dims_update_halo_kernel1_l2[2][1], arg2);
ACC<double> argp3(dims_update_halo_kernel1_l2[3][0], dims_update_halo_kernel1_l2[3][1], arg3);
ACC<double> argp4(dims_update_halo_kernel1_l2[4][0], dims_update_halo_kernel1_l2[4][1], arg4);
ACC<double> argp5(dims_update_halo_kernel1_l2[5][0], dims_update_halo_kernel1_l2[5][1], arg5);
ACC<double> argp6(dims_update_halo_kernel1_l2[6][0], dims_update_halo_kernel1_l2[6][1], arg6);
update_halo_kernel1_l2_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_l2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,15)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(15,"update_halo_kernel1_l2");
OPS_kernels[15].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != dims_update_halo_kernel1_l2_h[0][0] || ydim0 != dims_update_halo_kernel1_l2_h[0][1] || xdim1 != dims_update_halo_kernel1_l2_h[1][0] || ydim1 != dims_update_halo_kernel1_l2_h[1][1] || xdim2 != dims_update_halo_kernel1_l2_h[2][0] || ydim2 != dims_update_halo_kernel1_l2_h[2][1] || xdim3 != dims_update_halo_kernel1_l2_h[3][0] || ydim3 != dims_update_halo_kernel1_l2_h[3][1] || xdim4 != dims_update_halo_kernel1_l2_h[4][0] || ydim4 != dims_update_halo_kernel1_l2_h[4][1] || xdim5 != dims_update_halo_kernel1_l2_h[5][0] || ydim5 != dims_update_halo_kernel1_l2_h[5][1] || xdim6 != dims_update_halo_kernel1_l2_h[6][0] || ydim6 != dims_update_halo_kernel1_l2_h[6][1]) {
dims_update_halo_kernel1_l2_h[0][0] = xdim0;
dims_update_halo_kernel1_l2_h[0][1] = ydim0;
dims_update_halo_kernel1_l2_h[1][0] = xdim1;
dims_update_halo_kernel1_l2_h[1][1] = ydim1;
dims_update_halo_kernel1_l2_h[2][0] = xdim2;
dims_update_halo_kernel1_l2_h[2][1] = ydim2;
dims_update_halo_kernel1_l2_h[3][0] = xdim3;
dims_update_halo_kernel1_l2_h[3][1] = ydim3;
dims_update_halo_kernel1_l2_h[4][0] = xdim4;
dims_update_halo_kernel1_l2_h[4][1] = ydim4;
dims_update_halo_kernel1_l2_h[5][0] = xdim5;
dims_update_halo_kernel1_l2_h[5][1] = ydim5;
dims_update_halo_kernel1_l2_h[6][0] = xdim6;
dims_update_halo_kernel1_l2_h[6][1] = ydim6;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel1_l2, dims_update_halo_kernel1_l2_h, sizeof(dims_update_halo_kernel1_l2)));
}
int *arg7h = (int *)arg7.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[15].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel1_l2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[15].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[15].mpi_time += t2-t1;
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_l2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 15;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 15;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_l2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(15,"update_halo_kernel1_l2");
}
ops_enqueue_kernel(desc);
}
#endif
|
7309f5103ec6ec6092c9743544ca9f3d014d900e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/hip/Resize.cuh>
#include <c10/util/Exception.h>
#include <THH/THHGeneral.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstddef>
#include <cmath>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
return at::native::eye_out_cuda(result, n, /*m=*/-1);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
if(m < 0) {
m = n;
}
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) {
AT_ASSERT(options.backend() == at::Backend::CUDA);
AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged
TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = options.dtype();
auto storage_impl = c10::make_intrusive<StorageImpl>(
dtype,
nelements,
allocator->allocate(nelements * dtype.itemsize()),
allocator,
/*resizeable=*/true);
auto tensor = detail::make_tensor<TensorImpl>(storage_impl, CUDATensorId());
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
auto memory_format = optional_memory_format.value_or(MemoryFormat::Contiguous);
tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
auto t = at::native::empty_cuda({0}, options);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
return result.copy_(result_cpu);
}
#if 0
// This if condition should never be true because if n >= 30000 and the tensor has a Half type,
// check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here
// for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this.
if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid.
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
return result.copy_(randperm_out_cuda(result_float, n, generator));
}
#endif
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto keys_data = thrust::device_ptr<scalar_t>(keys.data_ptr<scalar_t>());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
thrust::device_ptr<scalar_t> shuffled_data;
if (result.is_contiguous()) {
shuffled_data = thrust::device_ptr<scalar_t>(result.data_ptr<scalar_t>());
} else {
shuffled = at::empty(n, result.options());
shuffled_data = thrust::device_ptr<scalar_t>(shuffled.data_ptr<scalar_t>());
}
auto state = globalContext().getTHCState();
THCThrustAllocator thrustAlloc(state);
auto policy = thrust::hip::par(thrustAlloc).on(at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
thrust::sequence(policy, shuffled_data, shuffled_data + n);
// Use the sorted order of keys to rearrange the result array
thrust::sort_by_key(policy, keys_data, keys_data + n, shuffled_data);
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
}
);
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the convertion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, options);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
hipLaunchKernelGGL(( tril_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data_ptr<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, options);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
hipLaunchKernelGGL(( triu_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data_ptr<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
});
}
return tensor;
}
}} // namespace at::native
| 7309f5103ec6ec6092c9743544ca9f3d014d900e.cu | #include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/cuda/Resize.cuh>
#include <c10/util/Exception.h>
#include <THC/THCGeneral.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstddef>
#include <cmath>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
return at::native::eye_out_cuda(result, n, /*m=*/-1);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
if(m < 0) {
m = n;
}
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) {
AT_ASSERT(options.backend() == at::Backend::CUDA);
AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged
TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = options.dtype();
auto storage_impl = c10::make_intrusive<StorageImpl>(
dtype,
nelements,
allocator->allocate(nelements * dtype.itemsize()),
allocator,
/*resizeable=*/true);
auto tensor = detail::make_tensor<TensorImpl>(storage_impl, CUDATensorId());
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
auto memory_format = optional_memory_format.value_or(MemoryFormat::Contiguous);
tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
auto t = at::native::empty_cuda({0}, options);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
return result.copy_(result_cpu);
}
#if 0
// This if condition should never be true because if n >= 30000 and the tensor has a Half type,
// check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here
// for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this.
if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid.
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
return result.copy_(randperm_out_cuda(result_float, n, generator));
}
#endif
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto keys_data = thrust::device_ptr<scalar_t>(keys.data_ptr<scalar_t>());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
thrust::device_ptr<scalar_t> shuffled_data;
if (result.is_contiguous()) {
shuffled_data = thrust::device_ptr<scalar_t>(result.data_ptr<scalar_t>());
} else {
shuffled = at::empty(n, result.options());
shuffled_data = thrust::device_ptr<scalar_t>(shuffled.data_ptr<scalar_t>());
}
auto state = globalContext().getTHCState();
THCThrustAllocator thrustAlloc(state);
auto policy = thrust::cuda::par(thrustAlloc).on(at::cuda::getCurrentCUDAStream());
thrust::sequence(policy, shuffled_data, shuffled_data + n);
// Use the sorted order of keys to rearrange the result array
thrust::sort_by_key(policy, keys_data, keys_data + n, shuffled_data);
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
}
);
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the convertion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, options);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
tril_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data_ptr<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, options);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
triu_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data_ptr<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
});
}
return tensor;
}
}} // namespace at::native
|
ccbc62e219661b2daae9ec09cff3dd4ee1231b55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/reduction/kernel/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace reduction {
template <typename ReductionKernel>
__global__ void kernel_reduce_splitk(typename ReductionKernel::Params params) {
__shared__ typename ReductionKernel::SharedStorage shared_storage;
ReductionKernel reduction_op;
reduction_op(params, shared_storage);
}
template <typename ReductionKernel>
class ReduceSplitKTestbed {
public:
using ElementAccumulator = typename ReductionKernel::ElementAccumulator;
using ElementWorkspace = typename ReductionKernel::ElementWorkspace;
using ElementOutput = typename ReductionKernel::ElementOutput;
using Layout = cutlass::layout::RowMajor;
public:
cutlass::Distribution::Kind distribution_workspace;
cutlass::Distribution::Kind distribution_source;
uint64_t seed;
public:
/// Ctor
ReduceSplitKTestbed(
cutlass::Distribution::Kind distribution_workspace = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind distribution_source = cutlass::Distribution::Uniform,
uint64_t seed = 2019
):
distribution_workspace(distribution_workspace),
distribution_source(distribution_source),
seed(seed) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomUniform(view, seed, 8, -8, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, -1);
} else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
} else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(),
view.capacity());
} else {
// TODO: Implement the rest
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Runs a single problem size
bool run(
cutlass::MatrixCoord problem_size,
int partitions,
ElementAccumulator alpha = 1,
ElementAccumulator beta = 0) {
cutlass::HostTensor<ElementWorkspace, Layout> workspace({
problem_size.row() * partitions,
problem_size.column()
});
cutlass::HostTensor<ElementOutput, Layout> source(problem_size);
cutlass::HostTensor<ElementOutput, Layout> destination(problem_size);
cutlass::HostTensor<ElementOutput, Layout> destination_reference(problem_size, false);
//
// Initialize
//
initialize_tensor(workspace.host_view(), distribution_workspace, seed);
initialize_tensor(source.host_view(), distribution_source, seed + 23);
cutlass::reference::host::TensorFill(destination.host_view());
workspace.sync_device();
source.sync_device();
destination.sync_device();
//
// Launch reduction kernel
//
dim3 block = ReductionKernel::block_shape();
dim3 grid = ReductionKernel::grid_shape(problem_size);
typename ReductionKernel::Params params(
problem_size,
partitions,
problem_size.row() * problem_size.column(),
workspace.device_ref(),
destination.device_ref(),
source.device_ref(),
{alpha, beta}
);
hipLaunchKernelGGL(( test::reduction::kernel_reduce_splitk<ReductionKernel>), dim3(grid), dim3(block) , 0, 0, params);
hipError_t result = hipDeviceSynchronize();
EXPECT_EQ(result, hipSuccess)
<< "CUDA error: " << hipGetErrorString(result);
destination.sync_host();
//
// Compute reference
//
for (int m = 0; m < problem_size.row(); ++m) {
for (int n = 0; n < problem_size.column(); ++n) {
ElementAccumulator accum = 0;
for (int k = 0; k < partitions; ++k) {
accum += ElementAccumulator(workspace.at({m + k * problem_size.row(), n}));
}
ElementAccumulator c = ElementAccumulator(source.at({m, n}));
destination_reference.at({m, n}) = ElementOutput(accum * alpha + beta * c);
}
}
//
// Compare
//
EXPECT_GT(cutlass::reference::host::TensorNorm(destination.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(destination_reference.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
destination.host_view(), destination_reference.host_view());
EXPECT_TRUE(passed)
<< "Workspace =\n" << workspace.host_view() << "\n\n"
<< "\n"
<< "Reference =\n" << destination_reference.host_view() << "\n\n"
<< "Computed =\n" << destination.host_view() << "\n";
return passed;
}
/// Runs through a variety of test cases
bool run_all() {
cutlass::MatrixCoord problem_sizes[] = {
{8, 8},
{136, 72},
{248, 232},
};
int partition_counts[] = {
1,3,4,5,11
};
bool passed = false;
for (cutlass::MatrixCoord problem : problem_sizes) {
for (int partitions : partition_counts) {
passed = run(problem, partitions);
if (!passed) {
return false;
}
}
}
return passed;
}
};
} // namespace reduction
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Strictly F32 data
//
TEST(Reduction_ReduceSplitK, f32_f32_f32_1_1x32) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = float;
int const kN = 1;
using Shape = cutlass::MatrixShape<1, 32>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f32_2_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = float;
int const kN = 2;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f16_2_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::half_t;
int const kN = 2;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f16_8_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::half_t;
int const kN = 8;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| ccbc62e219661b2daae9ec09cff3dd4ee1231b55.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide GEMM interface
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/reduction/kernel/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace reduction {
template <typename ReductionKernel>
__global__ void kernel_reduce_splitk(typename ReductionKernel::Params params) {
__shared__ typename ReductionKernel::SharedStorage shared_storage;
ReductionKernel reduction_op;
reduction_op(params, shared_storage);
}
template <typename ReductionKernel>
class ReduceSplitKTestbed {
public:
using ElementAccumulator = typename ReductionKernel::ElementAccumulator;
using ElementWorkspace = typename ReductionKernel::ElementWorkspace;
using ElementOutput = typename ReductionKernel::ElementOutput;
using Layout = cutlass::layout::RowMajor;
public:
cutlass::Distribution::Kind distribution_workspace;
cutlass::Distribution::Kind distribution_source;
uint64_t seed;
public:
/// Ctor
ReduceSplitKTestbed(
cutlass::Distribution::Kind distribution_workspace = cutlass::Distribution::Uniform,
cutlass::Distribution::Kind distribution_source = cutlass::Distribution::Uniform,
uint64_t seed = 2019
):
distribution_workspace(distribution_workspace),
distribution_source(distribution_source),
seed(seed) {
}
/// Helper to initialize a tensor view
template <typename Element, typename Layout>
bool initialize_tensor(cutlass::TensorView<Element, Layout> view,
cutlass::Distribution::Kind dist_kind,
uint64_t seed) {
if (dist_kind == cutlass::Distribution::Uniform) {
cutlass::reference::host::TensorFillRandomUniform(view, seed, 8, -8, 0);
}
else if (dist_kind == cutlass::Distribution::Gaussian) {
cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, -1);
} else if (dist_kind == cutlass::Distribution::Identity) {
cutlass::reference::host::TensorFillIdentity(view);
} else if (dist_kind == cutlass::Distribution::Sequential) {
cutlass::reference::host::BlockFillSequential(view.data(),
view.capacity());
} else {
// TODO: Implement the rest
EXPECT_TRUE(false) << "Not implemented";
return false;
}
return true;
}
/// Runs a single problem size
bool run(
cutlass::MatrixCoord problem_size,
int partitions,
ElementAccumulator alpha = 1,
ElementAccumulator beta = 0) {
cutlass::HostTensor<ElementWorkspace, Layout> workspace({
problem_size.row() * partitions,
problem_size.column()
});
cutlass::HostTensor<ElementOutput, Layout> source(problem_size);
cutlass::HostTensor<ElementOutput, Layout> destination(problem_size);
cutlass::HostTensor<ElementOutput, Layout> destination_reference(problem_size, false);
//
// Initialize
//
initialize_tensor(workspace.host_view(), distribution_workspace, seed);
initialize_tensor(source.host_view(), distribution_source, seed + 23);
cutlass::reference::host::TensorFill(destination.host_view());
workspace.sync_device();
source.sync_device();
destination.sync_device();
//
// Launch reduction kernel
//
dim3 block = ReductionKernel::block_shape();
dim3 grid = ReductionKernel::grid_shape(problem_size);
typename ReductionKernel::Params params(
problem_size,
partitions,
problem_size.row() * problem_size.column(),
workspace.device_ref(),
destination.device_ref(),
source.device_ref(),
{alpha, beta}
);
test::reduction::kernel_reduce_splitk<ReductionKernel><<< grid, block >>>(params);
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess)
<< "CUDA error: " << cudaGetErrorString(result);
destination.sync_host();
//
// Compute reference
//
for (int m = 0; m < problem_size.row(); ++m) {
for (int n = 0; n < problem_size.column(); ++n) {
ElementAccumulator accum = 0;
for (int k = 0; k < partitions; ++k) {
accum += ElementAccumulator(workspace.at({m + k * problem_size.row(), n}));
}
ElementAccumulator c = ElementAccumulator(source.at({m, n}));
destination_reference.at({m, n}) = ElementOutput(accum * alpha + beta * c);
}
}
//
// Compare
//
EXPECT_GT(cutlass::reference::host::TensorNorm(destination.host_view()), 0);
EXPECT_GT(cutlass::reference::host::TensorNorm(destination_reference.host_view()), 0);
bool passed = cutlass::reference::host::TensorEquals(
destination.host_view(), destination_reference.host_view());
EXPECT_TRUE(passed)
<< "Workspace =\n" << workspace.host_view() << "\n\n"
<< "\n"
<< "Reference =\n" << destination_reference.host_view() << "\n\n"
<< "Computed =\n" << destination.host_view() << "\n";
return passed;
}
/// Runs through a variety of test cases
bool run_all() {
cutlass::MatrixCoord problem_sizes[] = {
{8, 8},
{136, 72},
{248, 232},
};
int partition_counts[] = {
1,3,4,5,11
};
bool passed = false;
for (cutlass::MatrixCoord problem : problem_sizes) {
for (int partitions : partition_counts) {
passed = run(problem, partitions);
if (!passed) {
return false;
}
}
}
return passed;
}
};
} // namespace reduction
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Strictly F32 data
//
TEST(Reduction_ReduceSplitK, f32_f32_f32_1_1x32) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = float;
int const kN = 1;
using Shape = cutlass::MatrixShape<1, 32>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f32_2_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = float;
int const kN = 2;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f16_2_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::half_t;
int const kN = 2;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Vectorized access
//
TEST(Reduction_ReduceSplitK, f32_f32_f16_8_4x64) {
using ElementWorkspace = float;
using ElementAccumulator = float;
using ElementOutput = cutlass::half_t;
int const kN = 8;
using Shape = cutlass::MatrixShape<4, 64>;
using OutputOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput,
kN,
ElementAccumulator,
ElementAccumulator
>;
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
ElementAccumulator,
ElementWorkspace,
kN
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
Shape,
OutputOp,
ReductionOp
>;
test::reduction::ReduceSplitKTestbed<ReductionKernel> testbed;
EXPECT_TRUE(testbed.run_all());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
553bdc21d634d77ff1d63ed10771cde8e1ac344e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 5633 $
// $Date: 2009-07-01 15:02:51 +1000 (Wed, 01 Jul 2009) $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* scan_cta.cu
*
* @brief CUDPP CTA-level scan routines
*/
/** \defgroup cudpp_cta CUDPP CTA-Level API
* The CUDPP CTA-Level API contains functions that run on the GPU
* device. These are CUDA \c __device__ functions that are called
* from within other CUDA device functions (typically
* \link cudpp_kernel CUDPP Kernel-Level API\endlink functions).
* They are called CTA-level functions because they typically process
* s_data "owned" by each CTA within shared memory, and are agnostic of
* any other CTAs that may be running (or how many CTAs are running),
* other than to compute appropriate global memory addresses.
* @{
*/
/** @name Scan Functions
* @{
*/
#include <cudpp_globals.h>
#include <cudpp_util.h>
#include <math.h>
#include <cudpp.h>
/**
* @brief Macro to insert necessary __syncthreads() in device emulation mode
*/
#ifdef __DEVICE_EMULATION__
#define __EMUSYNC __syncthreads()
#else
#define __EMUSYNC
#endif
/**
* @brief Template class containing compile-time parameters to the scan functions
*
* ScanTraits is passed as a template parameter to all scan functions. By
* using these compile-time functions we can enable generic code while
* maintaining the highest performance. This is crucial for the performance
* of low-level workhorse algorithms like scan.
*
* @param T The datatype of the scan
* @param oper The ::CUDPPOperator to use for the scan (add, max, etc.)
* @param multiRow True if this is a multi-row scan
* @param unroll True if scan inner loops should be unrolled
* @param sums True if each block should write it's sum to the d_blockSums array (false for single-block scans)
* @param backward True if this is a backward scan
* @param fullBlock True if all blocks in this scan are full (CTA_SIZE * SCAN_ELEMENTS_PER_THREAD elements)
* @param exclusive True for exclusive scans, false for inclusive scans
*/
template <class T, CUDPPOperator oper, bool backward, bool exclusive,
bool multiRow, bool sums, bool fullBlock>
class ScanTraits
{
public:
//! Returns true if this is a backward scan
static __device__ bool isBackward() { return backward; };
//! Returns true if this is an exclusive scan
static __device__ bool isExclusive() { return exclusive; };
//! Returns true if this a multi-row scan.
static __device__ bool isMultiRow() { return multiRow; };
//! Returns true if this scan writes the sum of each block to the d_blockSums array (multi-block scans)
static __device__ bool writeSums() { return sums; };
//! Returns true if this is a full scan -- all blocks process CTA_SIZE * SCAN_ELEMENTS_PER_THREAD elements
static __device__ bool isFullBlock() { return fullBlock; };
//! The operator function used for the scan
static __device__ T op(const T a, const T b)
{
return Operator<T, oper>::op(a, b);
}
//! The identity value used by the scan
static __device__ T identity() { return Operator<T, oper>::identity(); }
};
//! This is used to insert syncthreads to avoid perf loss caused by 128-bit
//! load overlap that happens on G80. This gives about a 15% boost on scans on
//! G80.
//! @todo Parameterize this in case this perf detail changes on future GPUs.
#define DISALLOW_LOADSTORE_OVERLAP 1
/**
* @brief Handles loading input s_data from global memory to shared memory
* (vec4 version)
*
* Load a chunk of 8*blockDim.x elements from global memory into a
* shared memory array. Each thread loads two T4 elements (where
* T4 is, e.g. int4 or float4), computes the scan of those two vec4s in
* thread local arrays (in registers), and writes the two total sums of the
* vec4s into shared memory, where they will be cooperatively scanned with
* the other partial sums by all threads in the CTA.
*
* @param[out] s_out The output (shared) memory array
* @param[out] threadScan0 Intermediate per-thread partial sums array 1
* @param[out] threadScan1 Intermediate per-thread partial sums array 2
* @param[in] d_in The input (device) memory array
* @param[in] numElements The number of elements in the array being scanned
* @param[in] iDataOffset the offset of the input array in global memory for this
* thread block
* @param[out] ai The shared memory address for the thread's first element
* (returned for reuse)
* @param[out] bi The shared memory address for the thread's second element
* (returned for reuse)
* @param[out] aiDev The device memory address for this thread's first element
* (returned for reuse)
* @param[out] biDev The device memory address for this thread's second element
* (returned for reuse)
*/
template <class T, class traits>
__device__ void loadSharedChunkFromMem4(T *s_out,
T threadScan0[4],
T threadScan1[4],
const T *d_in,
int numElements,
int iDataOffset,
int &ai,
int &bi,
int &aiDev,
int &biDev)
{
int thid = threadIdx.x;
aiDev = iDataOffset + thid;
biDev = aiDev + blockDim.x;
// convert to 4-vector
typename typeToVector<T,4>::Result tempData;
typename typeToVector<T,4>::Result* inData = (typename typeToVector<T,4>::Result*)d_in;
ai = thid;
bi = thid + blockDim.x;
// read into tempData;
if (traits::isBackward())
{
int i = aiDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
tempData = inData[aiDev];
threadScan0[3] = tempData.w;
threadScan0[2] = traits::op(tempData.z, threadScan0[3]);
threadScan0[1] = traits::op(tempData.y, threadScan0[2]);
threadScan0[0] = s_out[ai]
= traits::op(tempData.x, threadScan0[1]);
}
else
{
threadScan0[3] = traits::identity();
threadScan0[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan0[3]);
threadScan0[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan0[2]);
threadScan0[0] = s_out[ai]
= traits::op((i < numElements) ? d_in[i] : traits::identity(), threadScan0[1]);
}
#ifdef DISALLOW_LOADSTORE_OVERLAP
__syncthreads();
#endif
i = biDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
tempData = inData[biDev];
threadScan1[3] = tempData.w;
threadScan1[2] = traits::op(tempData.z, threadScan1[3]);
threadScan1[1] = traits::op(tempData.y, threadScan1[2]);
threadScan1[0] = s_out[bi]
= traits::op(tempData.x, threadScan1[1]);
}
else
{
threadScan1[3] = traits::identity();
threadScan1[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan1[3]);
threadScan1[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan1[2]);
threadScan1[0] = s_out[bi]
= traits::op((i < numElements) ? d_in[i] : traits::identity(), threadScan1[1]);
}
__syncthreads();
// reverse s_data in shared memory
if (ai < CTA_SIZE)
{
unsigned int leftIdx = ai;
unsigned int rightIdx = (2 * CTA_SIZE - 1) - ai;
if (leftIdx < rightIdx)
{
T tmp = s_out[leftIdx];
s_out[leftIdx] = s_out[rightIdx];
s_out[rightIdx] = tmp;
}
}
__syncthreads();
}
else
{
int i = aiDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
tempData = inData[aiDev];
threadScan0[0] = tempData.x;
threadScan0[1] = traits::op(tempData.y, threadScan0[0]);
threadScan0[2] = traits::op(tempData.z, threadScan0[1]);
threadScan0[3] = s_out[ai]
= traits::op(tempData.w, threadScan0[2]);
}
else
{
threadScan0[0] = (i < numElements) ? d_in[i] : traits::identity();
threadScan0[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan0[0]);
threadScan0[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan0[1]);
threadScan0[3] = s_out[ai]
= traits::op(((i+3) < numElements) ? d_in[i+3] : traits::identity(), threadScan0[2]);
}
#ifdef DISALLOW_LOADSTORE_OVERLAP
__syncthreads();
#endif
i = biDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
tempData = inData[biDev];
threadScan1[0] = tempData.x;
threadScan1[1] = traits::op(tempData.y, threadScan1[0]);
threadScan1[2] = traits::op(tempData.z, threadScan1[1]);
threadScan1[3] = s_out[bi]
= traits::op(tempData.w, threadScan1[2]);
}
else
{
threadScan1[0] = (i < numElements) ? d_in[i] : traits::identity();
threadScan1[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan1[0]);
threadScan1[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan1[1]);
threadScan1[3] = s_out[bi]
= traits::op(((i+3) < numElements) ? d_in[i+3] : traits::identity(), threadScan1[2]);
}
__syncthreads();
}
}
/**
* @brief Handles storing result s_data from shared memory to global memory
* (vec4 version)
*
* Store a chunk of SCAN_ELTS_PER_THREAD*blockDim.x elements from shared memory
* into a device memory array. Each thread stores reads two elements from shared
* memory, adds them to the intermediate sums computed in
* loadSharedChunkFromMem4(), and writes two T4 elements (where
* T4 is, e.g. int4 or float4) to global memory.
*
* @param[out] d_out The output (device) memory array
* @param[in] threadScan0 Intermediate per-thread partial sums array 1
* (contents computed in loadSharedChunkFromMem4())
* @param[in] threadScan1 Intermediate per-thread partial sums array 2
* (contents computed in loadSharedChunkFromMem4())
* @param[in] s_in The input (shared) memory array
* @param[in] numElements The number of elements in the array being scanned
* @param[in] oDataOffset the offset of the output array in global memory
* for this thread block
* @param[in] ai The shared memory address for the thread's first element
* (computed in loadSharedChunkFromMem4())
* @param[in] bi The shared memory address for the thread's second element
* (computed in loadSharedChunkFromMem4())
* @param[in] aiDev The device memory address for this thread's first element
* (computed in loadSharedChunkFromMem4())
* @param[in] biDev The device memory address for this thread's second element
* (computed in loadSharedChunkFromMem4())
*/
template <class T, class traits>
__device__ void storeSharedChunkToMem4(T *d_out,
T threadScan0[4],
T threadScan1[4],
T *s_in,
int numElements,
int oDataOffset,
int ai,
int bi,
int aiDev,
int biDev)
{
// Convert to 4-vector
typename typeToVector<T,4>::Result tempData;
typename typeToVector<T,4>::Result* outData = (typename typeToVector<T,4>::Result*)d_out;
// write results to global memory
if (traits::isBackward())
{
if (ai < CTA_SIZE)
{
unsigned int leftIdx = ai;
unsigned int rightIdx = (2 * CTA_SIZE - 1) - ai;
if (leftIdx < rightIdx)
{
T tmp = s_in[leftIdx];
s_in[leftIdx] = s_in[rightIdx];
s_in[rightIdx] = tmp;
}
}
__syncthreads();
T temp = s_in[ai];
if (traits::isExclusive())
{
tempData.w = temp;
tempData.z = traits::op(temp, threadScan0[3]);
tempData.y = traits::op(temp, threadScan0[2]);
tempData.x = traits::op(temp, threadScan0[1]);
}
else
{
tempData.w = traits::op(temp, threadScan0[3]);
tempData.z = traits::op(temp, threadScan0[2]);
tempData.y = traits::op(temp, threadScan0[1]);
tempData.x = traits::op(temp, threadScan0[0]);
}
int i = aiDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
outData[aiDev] = tempData;
}
else
{
if (i < numElements) { d_out[i] = tempData.x;
if (i+1 < numElements) { d_out[i+1] = tempData.y;
if (i+2 < numElements) { d_out[i+2] = tempData.z; }}}
}
#ifdef DISALLOW_LOADSTORE_OVERLAP
__syncthreads();
#endif
temp = s_in[bi];
if (traits::isExclusive())
{
tempData.w = temp;
tempData.z = traits::op(temp, threadScan1[3]);
tempData.y = traits::op(temp, threadScan1[2]);
tempData.x = traits::op(temp, threadScan1[1]);
}
else
{
tempData.w = traits::op(temp, threadScan1[3]);
tempData.z = traits::op(temp, threadScan1[2]);
tempData.y = traits::op(temp, threadScan1[1]);
tempData.x = traits::op(temp, threadScan1[0]);
}
i = biDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
outData[biDev] = tempData;
}
else
{
if (i < numElements) { d_out[i] = tempData.x;
if (i+1 < numElements) { d_out[i+1] = tempData.y;
if (i+2 < numElements) { d_out[i+2] = tempData.z; }}}
}
}
else
{
T temp;
temp = s_in[ai];
if (traits::isExclusive())
{
tempData.x = temp;
tempData.y = traits::op(temp, threadScan0[0]);
tempData.z = traits::op(temp, threadScan0[1]);
tempData.w = traits::op(temp, threadScan0[2]);
}
else
{
tempData.x = traits::op(temp, threadScan0[0]);
tempData.y = traits::op(temp, threadScan0[1]);
tempData.z = traits::op(temp, threadScan0[2]);
tempData.w = traits::op(temp, threadScan0[3]);
}
int i = aiDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
outData[aiDev] = tempData;
}
else
{
// we can't use vec4 because the original array isn't a multiple of
// 4 elements
if ( i < numElements) { d_out[i] = tempData.x;
if ((i+1) < numElements) { d_out[i+1] = tempData.y;
if ((i+2) < numElements) { d_out[i+2] = tempData.z; } } }
}
#ifdef DISALLOW_LOADSTORE_OVERLAP
__syncthreads();
#endif
temp = s_in[bi];
if (traits::isExclusive())
{
tempData.x = temp;
tempData.y = traits::op(temp, threadScan1[0]);
tempData.z = traits::op(temp, threadScan1[1]);
tempData.w = traits::op(temp, threadScan1[2]);
}
else
{
tempData.x = traits::op(temp, threadScan1[0]);
tempData.y = traits::op(temp, threadScan1[1]);
tempData.z = traits::op(temp, threadScan1[2]);
tempData.w = traits::op(temp, threadScan1[3]);
}
i = biDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
outData[biDev] = tempData;
}
else
{
// we can't use vec4 because the original array isn't a multiple of
// 4 elements
if ( i < numElements) { d_out[i] = tempData.x;
if ((i+1) < numElements) { d_out[i+1] = tempData.y;
if ((i+2) < numElements) { d_out[i+2] = tempData.z; } } }
}
}
}
/** @brief Scan all warps of a CTA without synchronization
*
* The warp-scan algorithm breaks a block of data into warp-sized chunks, and
* scans the chunks independently with a warp of threads each. Because warps
* execute instructions in SIMD fashion, there is no need to synchronize in
* order to share data within a warp (only across warps). Also, in SIMD the
* most efficient algorithm is a step-efficient algorithm. Therefore, within
* each warp we use a Hillis-and-Steele-style scan that takes log2(N) steps
* to scan the warp [Daniel Hillis and Guy Steele 1986], rather than the
* work-efficient tree-based algorithm described by Guy Blelloch [1990] that
* takes 2 * log(N) steps and is in general more complex to implement.
* Previous versions of CUDPP used the Blelloch algorithm. For current GPUs,
* the warp size is 32, so this takes five steps per warp.
*
* Each thread is responsible for a single element of the array to be scanned.
* Each thread inputs a single value to the scan via \a val and returns
* its own scanned result element. The threads of each warp cooperate
* via the shared memory array \a s_data to scan WARP_SIZE elements.
*
* Template parameter \a maxlevel allows this warpscan to be performed on
* partial warps. For example, if only the first 8 elements of each warp need
* to be scanned, then warpscan only performs log2(8)=3 steps rather than 5.
*
* The computation uses 2 * WARP_SIZE elements of shared memory per warp to
* enable warps to offset beyond their input data and receive the identity
* element without using any branch instructions.
*
* \note s_data is declared volatile here to prevent the compiler from
* optimizing away writes to shared memory, and ensure correct intrawarp
* communication in the absence of __syncthreads.
*
* @return The result of the warp scan for the current thread
* @param[in] val The current threads's input to the scan
* @param[in,out] s_data A pointer to a temporary shared array of 2*CTA_SIZE
* elements used to compute the warp scans
*/
template<class T, class traits,int maxlevel>
__device__ T warpscan(T val, volatile T* s_data)
{
// The following is the same as 2 * 32 * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (WARP_SIZE-1))
int idx = 2 * threadIdx.x - (threadIdx.x & (WARP_SIZE-1));
s_data[idx] = traits::identity();
idx += WARP_SIZE;
T t = s_data[idx] = val; __EMUSYNC;
// This code is needed because the warp size of device emulation
// is only 1 thread, so sync-less cooperation within a warp doesn't
// work.
#ifdef __DEVICE_EMULATION__
t = s_data[idx - 1]; __EMUSYNC;
s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC;
t = s_data[idx - 2]; __EMUSYNC;
s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC;
t = s_data[idx - 4]; __EMUSYNC;
s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC;
t = s_data[idx - 8]; __EMUSYNC;
s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC;
t = s_data[idx - 16]; __EMUSYNC;
s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC;
#else
if (0 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 1]); }
if (1 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 2]); }
if (2 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 4]); }
if (3 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 8]); }
if (4 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx -16]); }
#endif
return s_data[idx-1]; // convert inclusive -> exclusive
}
/** @brief Perform a full CTA scan using the warp-scan algorithm
*
* As described in the comment for warpscan(), the warp-scan algorithm breaks
* a block of data into warp-sized chunks, and scans the chunks independently
* with a warp of threads each. To complete the scan, each warp <i>j</i> then
* writes its last element to element <i>j</i> of a temporary shared array.
* Then a single warp exclusive-scans these "warp sums". Finally, each thread
* adds the result of the warp sum scan to the result of the scan from the
* first pass.
*
* Because we scan 2*CTA_SIZE elements per thread, we have to call warpscan
* twice.
*
* @param x The first input value for the current thread
* @param y The second input value for the current thread
* @param s_data Temporary shared memory space of 2*CTA_SIZE elements for
* performing the scan
*/
template <class T, class traits>
__device__ void scanWarps(T x, T y,
T *s_data)
{
T val = warpscan<T, traits, 4>(x, s_data);
__syncthreads();
T val2 = warpscan<T, traits, 4>(y, s_data);
int idx = threadIdx.x;
if ((idx & 31)==31)
{
s_data[idx >> 5] = traits::op(val, x);
s_data[(idx + blockDim.x) >> 5] = traits::op(val2, y);
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (idx < 32)
#endif
{
s_data[idx] = warpscan<T,traits,(LOG_CTA_SIZE-LOG_WARP_SIZE+1)>(s_data[idx], s_data);
}
__syncthreads();
val = traits::op(val, s_data[idx >> 5]);
val2 = traits::op(val2, s_data[(idx + blockDim.x) >> 5]);
__syncthreads();
s_data[idx] = val;
s_data[idx+blockDim.x] = val2;
}
/**
* @brief CTA-level scan routine; scans s_data in shared memory in each thread block
*
* This function is the main CTA-level scan function. It may be called by other
* CUDA __global__ or __device__ functions. This function scans 2 * CTA_SIZE elements.
* Each thread is responsible for one element in each half of the input array.
* \note This code is intended to be run on a CTA of 128 threads. Other sizes are
* untested.
*
* @param[in] s_data The array to be scanned in shared memory
* @param[out] d_blockSums Array of per-block sums
* @param[in] blockSumIndex Location in \a d_blockSums to which to write this block's sum
*/
template <class T, class traits>
__device__ void scanCTA(T *s_data,
T *d_blockSums,
unsigned int blockSumIndex)
{
T val = s_data[threadIdx.x];
T val2 = s_data[threadIdx.x + blockDim.x];
__syncthreads();
scanWarps<T,traits>(val, val2, s_data);
__syncthreads();
if (traits::writeSums() && threadIdx.x == blockDim.x - 1)
{
d_blockSums[blockSumIndex] = traits::op(val2, s_data[threadIdx.x + blockDim.x]);
}
#ifdef __DEVICE_EMULATION__
// must sync in emulation mode when doing backward scans, because otherwise the
// shared memory array will get reversed before the block sums are read!
if (traits::isBackward())
__syncthreads();
#endif
}
/** @} */ // end scan functions
/** @} */ // end cudpp_cta
| 553bdc21d634d77ff1d63ed10771cde8e1ac344e.cu | // -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 5633 $
// $Date: 2009-07-01 15:02:51 +1000 (Wed, 01 Jul 2009) $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* scan_cta.cu
*
* @brief CUDPP CTA-level scan routines
*/
/** \defgroup cudpp_cta CUDPP CTA-Level API
* The CUDPP CTA-Level API contains functions that run on the GPU
* device. These are CUDA \c __device__ functions that are called
* from within other CUDA device functions (typically
* \link cudpp_kernel CUDPP Kernel-Level API\endlink functions).
* They are called CTA-level functions because they typically process
* s_data "owned" by each CTA within shared memory, and are agnostic of
* any other CTAs that may be running (or how many CTAs are running),
* other than to compute appropriate global memory addresses.
* @{
*/
/** @name Scan Functions
* @{
*/
#include <cudpp_globals.h>
#include <cudpp_util.h>
#include <math.h>
#include <cudpp.h>
/**
* @brief Macro to insert necessary __syncthreads() in device emulation mode
*/
#ifdef __DEVICE_EMULATION__
#define __EMUSYNC __syncthreads()
#else
#define __EMUSYNC
#endif
/**
* @brief Template class containing compile-time parameters to the scan functions
*
* ScanTraits is passed as a template parameter to all scan functions. By
* using these compile-time functions we can enable generic code while
* maintaining the highest performance. This is crucial for the performance
* of low-level workhorse algorithms like scan.
*
* @param T The datatype of the scan
* @param oper The ::CUDPPOperator to use for the scan (add, max, etc.)
* @param multiRow True if this is a multi-row scan
* @param unroll True if scan inner loops should be unrolled
* @param sums True if each block should write it's sum to the d_blockSums array (false for single-block scans)
* @param backward True if this is a backward scan
* @param fullBlock True if all blocks in this scan are full (CTA_SIZE * SCAN_ELEMENTS_PER_THREAD elements)
* @param exclusive True for exclusive scans, false for inclusive scans
*/
template <class T, CUDPPOperator oper, bool backward, bool exclusive,
bool multiRow, bool sums, bool fullBlock>
class ScanTraits
{
public:
//! Returns true if this is a backward scan
static __device__ bool isBackward() { return backward; };
//! Returns true if this is an exclusive scan
static __device__ bool isExclusive() { return exclusive; };
//! Returns true if this a multi-row scan.
static __device__ bool isMultiRow() { return multiRow; };
//! Returns true if this scan writes the sum of each block to the d_blockSums array (multi-block scans)
static __device__ bool writeSums() { return sums; };
//! Returns true if this is a full scan -- all blocks process CTA_SIZE * SCAN_ELEMENTS_PER_THREAD elements
static __device__ bool isFullBlock() { return fullBlock; };
//! The operator function used for the scan
static __device__ T op(const T a, const T b)
{
return Operator<T, oper>::op(a, b);
}
//! The identity value used by the scan
static __device__ T identity() { return Operator<T, oper>::identity(); }
};
//! This is used to insert syncthreads to avoid perf loss caused by 128-bit
//! load overlap that happens on G80. This gives about a 15% boost on scans on
//! G80.
//! @todo Parameterize this in case this perf detail changes on future GPUs.
#define DISALLOW_LOADSTORE_OVERLAP 1
/**
* @brief Handles loading input s_data from global memory to shared memory
* (vec4 version)
*
* Load a chunk of 8*blockDim.x elements from global memory into a
* shared memory array. Each thread loads two T4 elements (where
* T4 is, e.g. int4 or float4), computes the scan of those two vec4s in
* thread local arrays (in registers), and writes the two total sums of the
* vec4s into shared memory, where they will be cooperatively scanned with
* the other partial sums by all threads in the CTA.
*
* @param[out] s_out The output (shared) memory array
* @param[out] threadScan0 Intermediate per-thread partial sums array 1
* @param[out] threadScan1 Intermediate per-thread partial sums array 2
* @param[in] d_in The input (device) memory array
* @param[in] numElements The number of elements in the array being scanned
* @param[in] iDataOffset the offset of the input array in global memory for this
* thread block
* @param[out] ai The shared memory address for the thread's first element
* (returned for reuse)
* @param[out] bi The shared memory address for the thread's second element
* (returned for reuse)
* @param[out] aiDev The device memory address for this thread's first element
* (returned for reuse)
* @param[out] biDev The device memory address for this thread's second element
* (returned for reuse)
*/
template <class T, class traits>
__device__ void loadSharedChunkFromMem4(T *s_out,
T threadScan0[4],
T threadScan1[4],
const T *d_in,
int numElements,
int iDataOffset,
int &ai,
int &bi,
int &aiDev,
int &biDev)
{
int thid = threadIdx.x;
aiDev = iDataOffset + thid;
biDev = aiDev + blockDim.x;
// convert to 4-vector
typename typeToVector<T,4>::Result tempData;
typename typeToVector<T,4>::Result* inData = (typename typeToVector<T,4>::Result*)d_in;
ai = thid;
bi = thid + blockDim.x;
// read into tempData;
if (traits::isBackward())
{
int i = aiDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
tempData = inData[aiDev];
threadScan0[3] = tempData.w;
threadScan0[2] = traits::op(tempData.z, threadScan0[3]);
threadScan0[1] = traits::op(tempData.y, threadScan0[2]);
threadScan0[0] = s_out[ai]
= traits::op(tempData.x, threadScan0[1]);
}
else
{
threadScan0[3] = traits::identity();
threadScan0[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan0[3]);
threadScan0[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan0[2]);
threadScan0[0] = s_out[ai]
= traits::op((i < numElements) ? d_in[i] : traits::identity(), threadScan0[1]);
}
#ifdef DISALLOW_LOADSTORE_OVERLAP
__syncthreads();
#endif
i = biDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
tempData = inData[biDev];
threadScan1[3] = tempData.w;
threadScan1[2] = traits::op(tempData.z, threadScan1[3]);
threadScan1[1] = traits::op(tempData.y, threadScan1[2]);
threadScan1[0] = s_out[bi]
= traits::op(tempData.x, threadScan1[1]);
}
else
{
threadScan1[3] = traits::identity();
threadScan1[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan1[3]);
threadScan1[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan1[2]);
threadScan1[0] = s_out[bi]
= traits::op((i < numElements) ? d_in[i] : traits::identity(), threadScan1[1]);
}
__syncthreads();
// reverse s_data in shared memory
if (ai < CTA_SIZE)
{
unsigned int leftIdx = ai;
unsigned int rightIdx = (2 * CTA_SIZE - 1) - ai;
if (leftIdx < rightIdx)
{
T tmp = s_out[leftIdx];
s_out[leftIdx] = s_out[rightIdx];
s_out[rightIdx] = tmp;
}
}
__syncthreads();
}
else
{
int i = aiDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
tempData = inData[aiDev];
threadScan0[0] = tempData.x;
threadScan0[1] = traits::op(tempData.y, threadScan0[0]);
threadScan0[2] = traits::op(tempData.z, threadScan0[1]);
threadScan0[3] = s_out[ai]
= traits::op(tempData.w, threadScan0[2]);
}
else
{
threadScan0[0] = (i < numElements) ? d_in[i] : traits::identity();
threadScan0[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan0[0]);
threadScan0[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan0[1]);
threadScan0[3] = s_out[ai]
= traits::op(((i+3) < numElements) ? d_in[i+3] : traits::identity(), threadScan0[2]);
}
#ifdef DISALLOW_LOADSTORE_OVERLAP
__syncthreads();
#endif
i = biDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
tempData = inData[biDev];
threadScan1[0] = tempData.x;
threadScan1[1] = traits::op(tempData.y, threadScan1[0]);
threadScan1[2] = traits::op(tempData.z, threadScan1[1]);
threadScan1[3] = s_out[bi]
= traits::op(tempData.w, threadScan1[2]);
}
else
{
threadScan1[0] = (i < numElements) ? d_in[i] : traits::identity();
threadScan1[1] = traits::op(((i+1) < numElements) ? d_in[i+1] : traits::identity(), threadScan1[0]);
threadScan1[2] = traits::op(((i+2) < numElements) ? d_in[i+2] : traits::identity(), threadScan1[1]);
threadScan1[3] = s_out[bi]
= traits::op(((i+3) < numElements) ? d_in[i+3] : traits::identity(), threadScan1[2]);
}
__syncthreads();
}
}
/**
* @brief Handles storing result s_data from shared memory to global memory
* (vec4 version)
*
* Store a chunk of SCAN_ELTS_PER_THREAD*blockDim.x elements from shared memory
* into a device memory array. Each thread stores reads two elements from shared
* memory, adds them to the intermediate sums computed in
* loadSharedChunkFromMem4(), and writes two T4 elements (where
* T4 is, e.g. int4 or float4) to global memory.
*
* @param[out] d_out The output (device) memory array
* @param[in] threadScan0 Intermediate per-thread partial sums array 1
* (contents computed in loadSharedChunkFromMem4())
* @param[in] threadScan1 Intermediate per-thread partial sums array 2
* (contents computed in loadSharedChunkFromMem4())
* @param[in] s_in The input (shared) memory array
* @param[in] numElements The number of elements in the array being scanned
* @param[in] oDataOffset the offset of the output array in global memory
* for this thread block
* @param[in] ai The shared memory address for the thread's first element
* (computed in loadSharedChunkFromMem4())
* @param[in] bi The shared memory address for the thread's second element
* (computed in loadSharedChunkFromMem4())
* @param[in] aiDev The device memory address for this thread's first element
* (computed in loadSharedChunkFromMem4())
* @param[in] biDev The device memory address for this thread's second element
* (computed in loadSharedChunkFromMem4())
*/
template <class T, class traits>
__device__ void storeSharedChunkToMem4(T *d_out,
T threadScan0[4],
T threadScan1[4],
T *s_in,
int numElements,
int oDataOffset,
int ai,
int bi,
int aiDev,
int biDev)
{
// Convert to 4-vector
typename typeToVector<T,4>::Result tempData;
typename typeToVector<T,4>::Result* outData = (typename typeToVector<T,4>::Result*)d_out;
// write results to global memory
if (traits::isBackward())
{
if (ai < CTA_SIZE)
{
unsigned int leftIdx = ai;
unsigned int rightIdx = (2 * CTA_SIZE - 1) - ai;
if (leftIdx < rightIdx)
{
T tmp = s_in[leftIdx];
s_in[leftIdx] = s_in[rightIdx];
s_in[rightIdx] = tmp;
}
}
__syncthreads();
T temp = s_in[ai];
if (traits::isExclusive())
{
tempData.w = temp;
tempData.z = traits::op(temp, threadScan0[3]);
tempData.y = traits::op(temp, threadScan0[2]);
tempData.x = traits::op(temp, threadScan0[1]);
}
else
{
tempData.w = traits::op(temp, threadScan0[3]);
tempData.z = traits::op(temp, threadScan0[2]);
tempData.y = traits::op(temp, threadScan0[1]);
tempData.x = traits::op(temp, threadScan0[0]);
}
int i = aiDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
outData[aiDev] = tempData;
}
else
{
if (i < numElements) { d_out[i] = tempData.x;
if (i+1 < numElements) { d_out[i+1] = tempData.y;
if (i+2 < numElements) { d_out[i+2] = tempData.z; }}}
}
#ifdef DISALLOW_LOADSTORE_OVERLAP
__syncthreads();
#endif
temp = s_in[bi];
if (traits::isExclusive())
{
tempData.w = temp;
tempData.z = traits::op(temp, threadScan1[3]);
tempData.y = traits::op(temp, threadScan1[2]);
tempData.x = traits::op(temp, threadScan1[1]);
}
else
{
tempData.w = traits::op(temp, threadScan1[3]);
tempData.z = traits::op(temp, threadScan1[2]);
tempData.y = traits::op(temp, threadScan1[1]);
tempData.x = traits::op(temp, threadScan1[0]);
}
i = biDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
outData[biDev] = tempData;
}
else
{
if (i < numElements) { d_out[i] = tempData.x;
if (i+1 < numElements) { d_out[i+1] = tempData.y;
if (i+2 < numElements) { d_out[i+2] = tempData.z; }}}
}
}
else
{
T temp;
temp = s_in[ai];
if (traits::isExclusive())
{
tempData.x = temp;
tempData.y = traits::op(temp, threadScan0[0]);
tempData.z = traits::op(temp, threadScan0[1]);
tempData.w = traits::op(temp, threadScan0[2]);
}
else
{
tempData.x = traits::op(temp, threadScan0[0]);
tempData.y = traits::op(temp, threadScan0[1]);
tempData.z = traits::op(temp, threadScan0[2]);
tempData.w = traits::op(temp, threadScan0[3]);
}
int i = aiDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
outData[aiDev] = tempData;
}
else
{
// we can't use vec4 because the original array isn't a multiple of
// 4 elements
if ( i < numElements) { d_out[i] = tempData.x;
if ((i+1) < numElements) { d_out[i+1] = tempData.y;
if ((i+2) < numElements) { d_out[i+2] = tempData.z; } } }
}
#ifdef DISALLOW_LOADSTORE_OVERLAP
__syncthreads();
#endif
temp = s_in[bi];
if (traits::isExclusive())
{
tempData.x = temp;
tempData.y = traits::op(temp, threadScan1[0]);
tempData.z = traits::op(temp, threadScan1[1]);
tempData.w = traits::op(temp, threadScan1[2]);
}
else
{
tempData.x = traits::op(temp, threadScan1[0]);
tempData.y = traits::op(temp, threadScan1[1]);
tempData.z = traits::op(temp, threadScan1[2]);
tempData.w = traits::op(temp, threadScan1[3]);
}
i = biDev * 4;
if (traits::isFullBlock() || i + 3 < numElements)
{
outData[biDev] = tempData;
}
else
{
// we can't use vec4 because the original array isn't a multiple of
// 4 elements
if ( i < numElements) { d_out[i] = tempData.x;
if ((i+1) < numElements) { d_out[i+1] = tempData.y;
if ((i+2) < numElements) { d_out[i+2] = tempData.z; } } }
}
}
}
/** @brief Scan all warps of a CTA without synchronization
*
* The warp-scan algorithm breaks a block of data into warp-sized chunks, and
* scans the chunks independently with a warp of threads each. Because warps
* execute instructions in SIMD fashion, there is no need to synchronize in
* order to share data within a warp (only across warps). Also, in SIMD the
* most efficient algorithm is a step-efficient algorithm. Therefore, within
* each warp we use a Hillis-and-Steele-style scan that takes log2(N) steps
* to scan the warp [Daniel Hillis and Guy Steele 1986], rather than the
* work-efficient tree-based algorithm described by Guy Blelloch [1990] that
* takes 2 * log(N) steps and is in general more complex to implement.
* Previous versions of CUDPP used the Blelloch algorithm. For current GPUs,
* the warp size is 32, so this takes five steps per warp.
*
* Each thread is responsible for a single element of the array to be scanned.
* Each thread inputs a single value to the scan via \a val and returns
* its own scanned result element. The threads of each warp cooperate
* via the shared memory array \a s_data to scan WARP_SIZE elements.
*
* Template parameter \a maxlevel allows this warpscan to be performed on
* partial warps. For example, if only the first 8 elements of each warp need
* to be scanned, then warpscan only performs log2(8)=3 steps rather than 5.
*
* The computation uses 2 * WARP_SIZE elements of shared memory per warp to
* enable warps to offset beyond their input data and receive the identity
* element without using any branch instructions.
*
* \note s_data is declared volatile here to prevent the compiler from
* optimizing away writes to shared memory, and ensure correct intrawarp
* communication in the absence of __syncthreads.
*
* @return The result of the warp scan for the current thread
* @param[in] val The current threads's input to the scan
* @param[in,out] s_data A pointer to a temporary shared array of 2*CTA_SIZE
* elements used to compute the warp scans
*/
template<class T, class traits,int maxlevel>
__device__ T warpscan(T val, volatile T* s_data)
{
// The following is the same as 2 * 32 * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (WARP_SIZE-1))
int idx = 2 * threadIdx.x - (threadIdx.x & (WARP_SIZE-1));
s_data[idx] = traits::identity();
idx += WARP_SIZE;
T t = s_data[idx] = val; __EMUSYNC;
// This code is needed because the warp size of device emulation
// is only 1 thread, so sync-less cooperation within a warp doesn't
// work.
#ifdef __DEVICE_EMULATION__
t = s_data[idx - 1]; __EMUSYNC;
s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC;
t = s_data[idx - 2]; __EMUSYNC;
s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC;
t = s_data[idx - 4]; __EMUSYNC;
s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC;
t = s_data[idx - 8]; __EMUSYNC;
s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC;
t = s_data[idx - 16]; __EMUSYNC;
s_data[idx] = traits::op(s_data[idx],t); __EMUSYNC;
#else
if (0 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 1]); }
if (1 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 2]); }
if (2 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 4]); }
if (3 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx - 8]); }
if (4 <= maxlevel) { s_data[idx] = t = traits::op(t, s_data[idx -16]); }
#endif
return s_data[idx-1]; // convert inclusive -> exclusive
}
/** @brief Perform a full CTA scan using the warp-scan algorithm
*
* As described in the comment for warpscan(), the warp-scan algorithm breaks
* a block of data into warp-sized chunks, and scans the chunks independently
* with a warp of threads each. To complete the scan, each warp <i>j</i> then
* writes its last element to element <i>j</i> of a temporary shared array.
* Then a single warp exclusive-scans these "warp sums". Finally, each thread
* adds the result of the warp sum scan to the result of the scan from the
* first pass.
*
* Because we scan 2*CTA_SIZE elements per thread, we have to call warpscan
* twice.
*
* @param x The first input value for the current thread
* @param y The second input value for the current thread
* @param s_data Temporary shared memory space of 2*CTA_SIZE elements for
* performing the scan
*/
template <class T, class traits>
__device__ void scanWarps(T x, T y,
T *s_data)
{
T val = warpscan<T, traits, 4>(x, s_data);
__syncthreads();
T val2 = warpscan<T, traits, 4>(y, s_data);
int idx = threadIdx.x;
if ((idx & 31)==31)
{
s_data[idx >> 5] = traits::op(val, x);
s_data[(idx + blockDim.x) >> 5] = traits::op(val2, y);
}
__syncthreads();
#ifndef __DEVICE_EMULATION__
if (idx < 32)
#endif
{
s_data[idx] = warpscan<T,traits,(LOG_CTA_SIZE-LOG_WARP_SIZE+1)>(s_data[idx], s_data);
}
__syncthreads();
val = traits::op(val, s_data[idx >> 5]);
val2 = traits::op(val2, s_data[(idx + blockDim.x) >> 5]);
__syncthreads();
s_data[idx] = val;
s_data[idx+blockDim.x] = val2;
}
/**
* @brief CTA-level scan routine; scans s_data in shared memory in each thread block
*
* This function is the main CTA-level scan function. It may be called by other
* CUDA __global__ or __device__ functions. This function scans 2 * CTA_SIZE elements.
* Each thread is responsible for one element in each half of the input array.
* \note This code is intended to be run on a CTA of 128 threads. Other sizes are
* untested.
*
* @param[in] s_data The array to be scanned in shared memory
* @param[out] d_blockSums Array of per-block sums
* @param[in] blockSumIndex Location in \a d_blockSums to which to write this block's sum
*/
template <class T, class traits>
__device__ void scanCTA(T *s_data,
T *d_blockSums,
unsigned int blockSumIndex)
{
T val = s_data[threadIdx.x];
T val2 = s_data[threadIdx.x + blockDim.x];
__syncthreads();
scanWarps<T,traits>(val, val2, s_data);
__syncthreads();
if (traits::writeSums() && threadIdx.x == blockDim.x - 1)
{
d_blockSums[blockSumIndex] = traits::op(val2, s_data[threadIdx.x + blockDim.x]);
}
#ifdef __DEVICE_EMULATION__
// must sync in emulation mode when doing backward scans, because otherwise the
// shared memory array will get reversed before the block sums are read!
if (traits::isBackward())
__syncthreads();
#endif
}
/** @} */ // end scan functions
/** @} */ // end cudpp_cta
|
d88755195efa8138aea0e81de66413858a875be4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "apply_step_function.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
apply_step_function), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
apply_step_function), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
apply_step_function), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d88755195efa8138aea0e81de66413858a875be4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "apply_step_function.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
apply_step_function<<<gridBlock,threadBlock>>>(input,output,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
apply_step_function<<<gridBlock,threadBlock>>>(input,output,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
apply_step_function<<<gridBlock,threadBlock>>>(input,output,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
045007776db80d75ba3106e801b8d9c1798b745f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "helper_cuda.h"
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define TAG_MASK 0xFFFFFFFFU
inline __device__ void addByte(uint *s_WarpHist, uint data, uint threadTag)
{
atomicAdd(s_WarpHist + data, 1);
}
inline __device__ void addWord(uint *s_WarpHist, uint data, uint tag)
{
addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256Kernel(uint *d_PartialHistograms, uint *d_Data, uint dataCount)
{
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint *s_WarpHist= s_Hist + (threadIdx.x >> LOG2_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for (uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
{
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
}
//Cycle through the entire data set, update subhistograms for each warp
const uint tag = threadIdx.x << (UINT_BITS - LOG2_WARP_SIZE);
__syncthreads();
for (uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x))
{
uint data = d_Data[pos];
addWord(s_WarpHist, data, tag);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for (uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE)
{
uint sum = 0;
for (uint i = 0; i < WARP_COUNT; i++)
{
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
}
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram256Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
)
{
uint sum = 0;
for (uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
{
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
}
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
__syncthreads();
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[blockIdx.x] = data[0];
}
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU histogram
////////////////////////////////////////////////////////////////////////////////
//histogram256kernel() intermediate results buffer
static const uint PARTIAL_HISTOGRAM256_COUNT = 240;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram256(void)
{
checkCudaErrors(hipMalloc((void **)&d_PartialHistograms, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT * sizeof(uint)));
}
//Internal memory deallocation
extern "C" void closeHistogram256(void)
{
checkCudaErrors(hipFree(d_PartialHistograms));
}
extern "C" void histogram256(
uint *d_Histogram,
void *d_Data,
uint byteCount
)
{
assert(byteCount % sizeof(uint) == 0);
hipLaunchKernelGGL(( histogram256Kernel), dim3(PARTIAL_HISTOGRAM256_COUNT), dim3(HISTOGRAM256_THREADBLOCK_SIZE), 0, 0,
d_PartialHistograms,
(uint *)d_Data,
byteCount / sizeof(uint)
);
getLastCudaError("histogram256Kernel() execution failed\n");
hipLaunchKernelGGL(( mergeHistogram256Kernel), dim3(HISTOGRAM256_BIN_COUNT), dim3(MERGE_THREADBLOCK_SIZE), 0, 0,
d_Histogram,
d_PartialHistograms,
PARTIAL_HISTOGRAM256_COUNT
);
getLastCudaError("mergeHistogram256Kernel() execution failed\n");
}
#define clip(minv, maxv, value) ((value)<minv) ? minv : (((value)>maxv) ? maxv : (value))
__global__ void kernel_rgbHist(
unsigned char *dst,const unsigned char *src, uint *d_Histogram,
int width, int height)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x >= width || y >= height)
return;
int offset = y*width*3 + 3*x;
dst[offset] = clip(0,255,d_Histogram[src[offset]]);
dst[offset+1] = clip(0,255,d_Histogram[src[offset+1]]);
dst[offset+2] = clip(0,255,d_Histogram[src[offset+2]]);
}
extern "C" int rgbHist_(
unsigned char *dst,const unsigned char *src,uint *d_Histogram,
int width, int height)
{
dim3 block((width+31)/32,(height+31)/32);
dim3 thread(32, 32);
hipLaunchKernelGGL(( kernel_rgbHist), dim3(block), dim3(thread), 0, 0, dst, src, d_Histogram, width, height);
return 0;
}
__global__ void kernel_grayHist(
unsigned char *dst,const unsigned char *src,uint *d_Histogram,
int width, int height)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x >= width || y >= height)
return;
int offset = y*width + x;
dst[offset] = clip(0,255,d_Histogram[src[offset]]);
}
extern "C" int grayHist_(
unsigned char *dst,const unsigned char *src,uint *d_Histogram,
int width, int height)
{
dim3 block((width+31)/32,(height+31)/32);
dim3 thread(32, 32);
hipLaunchKernelGGL(( kernel_grayHist), dim3(block), dim3(thread), 0, 0, dst, src, d_Histogram, width, height);
return 0;
}
| 045007776db80d75ba3106e801b8d9c1798b745f.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "helper_cuda.h"
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define TAG_MASK 0xFFFFFFFFU
inline __device__ void addByte(uint *s_WarpHist, uint data, uint threadTag)
{
atomicAdd(s_WarpHist + data, 1);
}
inline __device__ void addWord(uint *s_WarpHist, uint data, uint tag)
{
addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256Kernel(uint *d_PartialHistograms, uint *d_Data, uint dataCount)
{
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint *s_WarpHist= s_Hist + (threadIdx.x >> LOG2_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for (uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
{
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
}
//Cycle through the entire data set, update subhistograms for each warp
const uint tag = threadIdx.x << (UINT_BITS - LOG2_WARP_SIZE);
__syncthreads();
for (uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x))
{
uint data = d_Data[pos];
addWord(s_WarpHist, data, tag);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for (uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE)
{
uint sum = 0;
for (uint i = 0; i < WARP_COUNT; i++)
{
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
}
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram256Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
)
{
uint sum = 0;
for (uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
{
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
}
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
__syncthreads();
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[blockIdx.x] = data[0];
}
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU histogram
////////////////////////////////////////////////////////////////////////////////
//histogram256kernel() intermediate results buffer
static const uint PARTIAL_HISTOGRAM256_COUNT = 240;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram256(void)
{
checkCudaErrors(cudaMalloc((void **)&d_PartialHistograms, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT * sizeof(uint)));
}
//Internal memory deallocation
extern "C" void closeHistogram256(void)
{
checkCudaErrors(cudaFree(d_PartialHistograms));
}
extern "C" void histogram256(
uint *d_Histogram,
void *d_Data,
uint byteCount
)
{
assert(byteCount % sizeof(uint) == 0);
histogram256Kernel<<<PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE>>>(
d_PartialHistograms,
(uint *)d_Data,
byteCount / sizeof(uint)
);
getLastCudaError("histogram256Kernel() execution failed\n");
mergeHistogram256Kernel<<<HISTOGRAM256_BIN_COUNT, MERGE_THREADBLOCK_SIZE>>>(
d_Histogram,
d_PartialHistograms,
PARTIAL_HISTOGRAM256_COUNT
);
getLastCudaError("mergeHistogram256Kernel() execution failed\n");
}
#define clip(minv, maxv, value) ((value)<minv) ? minv : (((value)>maxv) ? maxv : (value))
__global__ void kernel_rgbHist(
unsigned char *dst,const unsigned char *src, uint *d_Histogram,
int width, int height)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x >= width || y >= height)
return;
int offset = y*width*3 + 3*x;
dst[offset] = clip(0,255,d_Histogram[src[offset]]);
dst[offset+1] = clip(0,255,d_Histogram[src[offset+1]]);
dst[offset+2] = clip(0,255,d_Histogram[src[offset+2]]);
}
extern "C" int rgbHist_(
unsigned char *dst,const unsigned char *src,uint *d_Histogram,
int width, int height)
{
dim3 block((width+31)/32,(height+31)/32);
dim3 thread(32, 32);
kernel_rgbHist<<<block, thread>>>(dst, src, d_Histogram, width, height);
return 0;
}
__global__ void kernel_grayHist(
unsigned char *dst,const unsigned char *src,uint *d_Histogram,
int width, int height)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(x >= width || y >= height)
return;
int offset = y*width + x;
dst[offset] = clip(0,255,d_Histogram[src[offset]]);
}
extern "C" int grayHist_(
unsigned char *dst,const unsigned char *src,uint *d_Histogram,
int width, int height)
{
dim3 block((width+31)/32,(height+31)/32);
dim3 thread(32, 32);
kernel_grayHist<<<block, thread>>>( dst, src, d_Histogram, width, height);
return 0;
}
|
b0267f81a43af9525db0c3f7258db0eb1549b04d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
*/
#include "magma_internal.h"
/******************************************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
zswapdblk_kernel( int nb,
magmaDoubleComplex *dA, int ldda, int inca,
magmaDoubleComplex *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
magmaDoubleComplex tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/***************************************************************************//**
Purpose
-------
zswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB COMPLEX_16 array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swapdblk
*******************************************************************************/
extern "C" void
magmablas_zswapdblk(
magma_int_t n, magma_int_t nb,
magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaDoubleComplex_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
hipLaunchKernelGGL(( zswapdblk_kernel), dim3(nblocks), dim3(nb), 0, queue->cuda_stream() ,
nb, dA, ldda, inca,
dB, lddb, incb );
}
}
| b0267f81a43af9525db0c3f7258db0eb1549b04d.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> s d c
*/
#include "magma_internal.h"
/******************************************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
zswapdblk_kernel( int nb,
magmaDoubleComplex *dA, int ldda, int inca,
magmaDoubleComplex *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
magmaDoubleComplex tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/***************************************************************************//**
Purpose
-------
zswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA COMPLEX_16 array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB COMPLEX_16 array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swapdblk
*******************************************************************************/
extern "C" void
magmablas_zswapdblk(
magma_int_t n, magma_int_t nb,
magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaDoubleComplex_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
zswapdblk_kernel<<< nblocks, nb, 0, queue->cuda_stream() >>>
( nb, dA, ldda, inca,
dB, lddb, incb );
}
}
|
b0742c0d6f2211990a2a854e5f40f8b2277900f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "headers/myHeaders.h"
#include "headers/myUtilityFunctions.h"
using namespace std;
__global__ void update_array_gpu_tiled(int mode, int i, cellType *d_array)
{
int myBlockId = (blockIdx.x) + 1;
int r,c;
//generate hanging points (r,c)
if (mode == 1)
{
r = 1 + (i-1) * TILE_ROWS - ( (myBlockId - 1) * TILE_ROWS);
c = (myBlockId - 1) * TILE_COLS + dependencyWidthLeft;
}
else
{
int midPointIteration = nCols / TILE_COLS;
r = (1 + (midPointIteration - 1) * TILE_ROWS) - ((myBlockId - 1) * TILE_ROWS);
c = ((midPointIteration - i) * (TILE_COLS)) + ((myBlockId - 1)* TILE_COLS) + dependencyWidthLeft ;
}
//generate my location and process the block
// myCol is the column assigned to thread x of a given block
int myCol = c + threadIdx.x - dependencyWidthLeft;
__shared__ cellType sharedArray [TILE_COLS + dependencyWidthLeft];
for (int iter = 1; iter <= TILE_ROWS; ++iter)
{
int myRow = r + (iter-1);
sharedArray[threadIdx.x] = d_array(myRow-1, myCol);
__syncthreads();
if (threadIdx.x >= dependencyWidthLeft)
{
int a = (d_array(myRow, myCol).value1 + sharedArray[threadIdx.x].value1) / (sharedArray[threadIdx.x].value2 + 3) ;
int b = (d_array(myRow, myCol).value1 + sharedArray[threadIdx.x - 1].value1) / (sharedArray[threadIdx.x - 1].value2 + 3);
int c = (d_array(myRow, myCol).value1 + sharedArray[threadIdx.x - 2].value1) + (sharedArray[threadIdx.x - 2].value2 + 3);
if ((a >= b) && (a >=c))
{
d_array(myRow, myCol).value1 = a;
d_array(myRow, myCol).value2 = sharedArray[threadIdx.x].value2 + 3;
}
else
{
if ((b >= a) && (b >=c))
{
d_array(myRow, myCol).value1 = b;
d_array(myRow, myCol).value2 = sharedArray[threadIdx.x - 1].value2 + 3;
}
else
{
d_array(myRow, myCol).value1 = c;
d_array(myRow, myCol).value2 = sharedArray[threadIdx.x - 2].value2 + 3;
}
}
}
__syncthreads();
}
}
int main(int argc, char const *argv[])
{
//create array at host : initialize accordingly
cellType *h_array;
h_array = create_array_host();
//initialize base row arguments (cellType *h_array, int rowNumber, int mode, int value)
// : mode =1 for random initialization, put any value in that case
initialize_this_row(h_array, 0, 1, -1);
//Create array at device
cellType *d_array;
hipMalloc((void**) &d_array, sizeof(cellType)*((nRows) * TOTAL_COLS));
//copy host array to device arrray, if needed
copy_host_to_device(h_array, d_array);
GpuTimer phase1;
phase1.Start();
//create a wrapper to design tiling iterations
int ThreadsPerBlock = dependencyWidthLeft + TILE_COLS;
for (int i = 1; i <= (nRows/TILE_ROWS); i++)
{
//number of blocks in the ith iteration will be equal to i
hipLaunchKernelGGL(( update_array_gpu_tiled), dim3(dim3(i,1,1)), dim3(dim3(ThreadsPerBlock,1,1)), 0, 0, 1, i, d_array);
}
for (int i = (nRows/TILE_ROWS)-1; i >= 1; i--)
{
//number of blocks in the ith iteration will be equal to i
hipLaunchKernelGGL(( update_array_gpu_tiled), dim3(dim3(i,1,1)), dim3(dim3(ThreadsPerBlock,1,1)), 0, 0, 2, i, d_array);
}
phase1.Stop();
cout <<"Time (Tiled GPU): " <<phase1.Elapsed()<< " Milli Seconds\n";
//copy back to cpu
copy_device_to_host(h_array, d_array);
//Access the resultant matrix : dump into output file
//write_array_console(h_array);
//ofstream myfile ("files_output/o_gpu_tiled_shmem.txt");
//write_array_file(h_array, myfile);
return 0;
}
| b0742c0d6f2211990a2a854e5f40f8b2277900f1.cu |
#include "headers/myHeaders.h"
#include "headers/myUtilityFunctions.h"
using namespace std;
__global__ void update_array_gpu_tiled(int mode, int i, cellType *d_array)
{
int myBlockId = (blockIdx.x) + 1;
int r,c;
//generate hanging points (r,c)
if (mode == 1)
{
r = 1 + (i-1) * TILE_ROWS - ( (myBlockId - 1) * TILE_ROWS);
c = (myBlockId - 1) * TILE_COLS + dependencyWidthLeft;
}
else
{
int midPointIteration = nCols / TILE_COLS;
r = (1 + (midPointIteration - 1) * TILE_ROWS) - ((myBlockId - 1) * TILE_ROWS);
c = ((midPointIteration - i) * (TILE_COLS)) + ((myBlockId - 1)* TILE_COLS) + dependencyWidthLeft ;
}
//generate my location and process the block
// myCol is the column assigned to thread x of a given block
int myCol = c + threadIdx.x - dependencyWidthLeft;
__shared__ cellType sharedArray [TILE_COLS + dependencyWidthLeft];
for (int iter = 1; iter <= TILE_ROWS; ++iter)
{
int myRow = r + (iter-1);
sharedArray[threadIdx.x] = d_array(myRow-1, myCol);
__syncthreads();
if (threadIdx.x >= dependencyWidthLeft)
{
int a = (d_array(myRow, myCol).value1 + sharedArray[threadIdx.x].value1) / (sharedArray[threadIdx.x].value2 + 3) ;
int b = (d_array(myRow, myCol).value1 + sharedArray[threadIdx.x - 1].value1) / (sharedArray[threadIdx.x - 1].value2 + 3);
int c = (d_array(myRow, myCol).value1 + sharedArray[threadIdx.x - 2].value1) + (sharedArray[threadIdx.x - 2].value2 + 3);
if ((a >= b) && (a >=c))
{
d_array(myRow, myCol).value1 = a;
d_array(myRow, myCol).value2 = sharedArray[threadIdx.x].value2 + 3;
}
else
{
if ((b >= a) && (b >=c))
{
d_array(myRow, myCol).value1 = b;
d_array(myRow, myCol).value2 = sharedArray[threadIdx.x - 1].value2 + 3;
}
else
{
d_array(myRow, myCol).value1 = c;
d_array(myRow, myCol).value2 = sharedArray[threadIdx.x - 2].value2 + 3;
}
}
}
__syncthreads();
}
}
int main(int argc, char const *argv[])
{
//create array at host : initialize accordingly
cellType *h_array;
h_array = create_array_host();
//initialize base row arguments (cellType *h_array, int rowNumber, int mode, int value)
// : mode =1 for random initialization, put any value in that case
initialize_this_row(h_array, 0, 1, -1);
//Create array at device
cellType *d_array;
cudaMalloc((void**) &d_array, sizeof(cellType)*((nRows) * TOTAL_COLS));
//copy host array to device arrray, if needed
copy_host_to_device(h_array, d_array);
GpuTimer phase1;
phase1.Start();
//create a wrapper to design tiling iterations
int ThreadsPerBlock = dependencyWidthLeft + TILE_COLS;
for (int i = 1; i <= (nRows/TILE_ROWS); i++)
{
//number of blocks in the ith iteration will be equal to i
update_array_gpu_tiled<<<dim3(i,1,1), dim3(ThreadsPerBlock,1,1)>>>(1, i, d_array);
}
for (int i = (nRows/TILE_ROWS)-1; i >= 1; i--)
{
//number of blocks in the ith iteration will be equal to i
update_array_gpu_tiled<<<dim3(i,1,1), dim3(ThreadsPerBlock,1,1)>>>(2, i, d_array);
}
phase1.Stop();
cout <<"Time (Tiled GPU): " <<phase1.Elapsed()<< " Milli Seconds\n";
//copy back to cpu
copy_device_to_host(h_array, d_array);
//Access the resultant matrix : dump into output file
//write_array_console(h_array);
//ofstream myfile ("files_output/o_gpu_tiled_shmem.txt");
//write_array_file(h_array, myfile);
return 0;
}
|
e2f59d94f59d3bab84ae199f7e76f2626a991fe2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 28.11.2018
//
#include <ops/specials_cuda.h>
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
__global__ void bitonicSortStepKernelValue(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) {
auto x = static_cast<X*>(vx);
auto y = static_cast<Y*>(vy);
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
__shared__ Nd4jLong xLength;
if (threadIdx.x == 0)
xLength = shape::length(xShapeInfo);
__syncthreads();
if (i >= length)
return;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
int posI = shape::getIndexOffset(i, yShapeInfo, xLength);
int posIXJ = shape::getIndexOffset(ixj, yShapeInfo, xLength);
if ((i&k)==0) {
/* Sort ascending */
if (!descending == (y[posI]>y[posIXJ])) {
/* exchange(i,ixj); */
X temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
Y ytemp = y[posI];
y[posI] = y[posIXJ];
y[posIXJ] = ytemp;
}
} else if ((i&k)!=0) {
/* Sort descending */
if (!descending == (y[posI]<y[posIXJ])) {
/* exchange(i,ixj); */
X temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
Y ytemp = y[posI];
y[posI] = y[posIXJ];
y[posIXJ] = ytemp;
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
__global__ void bitonicSortStepKernelKey(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) {
auto x = static_cast<X*>(vx);
auto y = static_cast<Y*>(vy);
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
__shared__ Nd4jLong xLength;
if (threadIdx.x == 0)
xLength = shape::length(xShapeInfo);
__syncthreads();
if (i >= length)
return;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
int posI = shape::getIndexOffset(i, xShapeInfo, xLength);
int posIXJ = shape::getIndexOffset(ixj, xShapeInfo, xLength);
if ((i&k)==0) {
/* Sort ascending */
if (!descending == (x[posI]>x[posIXJ])) {
/* exchange(i,ixj); */
X temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
Y ytemp = y[posI];
y[posI] = y[posIXJ];
y[posIXJ] = ytemp;
}
} else if ((i&k)!=0) {
/* Sort descending */
if (!descending == (x[posI]<x[posIXJ])) {
/* exchange(i,ixj); */
X temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
Y ytemp = y[posI];
y[posI] = y[posIXJ];
y[posIXJ] = ytemp;
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void bitonicSortStepKernel(void *vx, Nd4jLong *xShapeInfo, int j, int k, int length, bool descending) {
auto x = static_cast<T*>(vx);
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
__shared__ Nd4jLong xLength;
if (threadIdx.x == 0)
xLength = shape::length(xShapeInfo);
__syncthreads();
if (i >= length)
return;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
int posI = shape::getIndexOffset(i, xShapeInfo, xLength);
int posIXJ = shape::getIndexOffset(ixj, xShapeInfo, xLength);
if ((i&k)==0) {
/* Sort ascending */
if (!descending == (x[posI]>x[posIXJ])) {
/* exchange(i,ixj); */
T temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
}
} else if ((i&k)!=0) {
/* Sort descending */
if (!descending == (x[posI]<x[posIXJ])) {
/* exchange(i,ixj); */
T temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void bitonicSortStepGeneric(dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int j, int k, int length, bool descending) {
hipLaunchKernelGGL(( bitonicSortStepKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, j, k, length, descending);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
__host__ void bitonicSortStepGenericKey(dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) {
hipLaunchKernelGGL(( bitonicSortStepKernelKey<X,Y>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, vy, yShapeInfo, j, k, length, descending);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
__host__ void bitonicSortStepGenericValue(dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) {
hipLaunchKernelGGL(( bitonicSortStepKernelValue<X,Y>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, xShapeInfo, vy, yShapeInfo, j, k, length, descending);
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT bitonicSortStepGeneric, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int j, int k, int length, bool descending), LIBND4J_TYPES);
BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT bitonicSortStepGenericKey, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);
BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT bitonicSortStepGenericValue, (dim3 &launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);
| e2f59d94f59d3bab84ae199f7e76f2626a991fe2.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma, created on 28.11.2018
//
#include <ops/specials_cuda.h>
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
__global__ void bitonicSortStepKernelValue(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) {
auto x = static_cast<X*>(vx);
auto y = static_cast<Y*>(vy);
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
__shared__ Nd4jLong xLength;
if (threadIdx.x == 0)
xLength = shape::length(xShapeInfo);
__syncthreads();
if (i >= length)
return;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
int posI = shape::getIndexOffset(i, yShapeInfo, xLength);
int posIXJ = shape::getIndexOffset(ixj, yShapeInfo, xLength);
if ((i&k)==0) {
/* Sort ascending */
if (!descending == (y[posI]>y[posIXJ])) {
/* exchange(i,ixj); */
X temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
Y ytemp = y[posI];
y[posI] = y[posIXJ];
y[posIXJ] = ytemp;
}
} else if ((i&k)!=0) {
/* Sort descending */
if (!descending == (y[posI]<y[posIXJ])) {
/* exchange(i,ixj); */
X temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
Y ytemp = y[posI];
y[posI] = y[posIXJ];
y[posIXJ] = ytemp;
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
__global__ void bitonicSortStepKernelKey(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) {
auto x = static_cast<X*>(vx);
auto y = static_cast<Y*>(vy);
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
__shared__ Nd4jLong xLength;
if (threadIdx.x == 0)
xLength = shape::length(xShapeInfo);
__syncthreads();
if (i >= length)
return;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
int posI = shape::getIndexOffset(i, xShapeInfo, xLength);
int posIXJ = shape::getIndexOffset(ixj, xShapeInfo, xLength);
if ((i&k)==0) {
/* Sort ascending */
if (!descending == (x[posI]>x[posIXJ])) {
/* exchange(i,ixj); */
X temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
Y ytemp = y[posI];
y[posI] = y[posIXJ];
y[posIXJ] = ytemp;
}
} else if ((i&k)!=0) {
/* Sort descending */
if (!descending == (x[posI]<x[posIXJ])) {
/* exchange(i,ixj); */
X temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
Y ytemp = y[posI];
y[posI] = y[posIXJ];
y[posIXJ] = ytemp;
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ void bitonicSortStepKernel(void *vx, Nd4jLong *xShapeInfo, int j, int k, int length, bool descending) {
auto x = static_cast<T*>(vx);
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
__shared__ Nd4jLong xLength;
if (threadIdx.x == 0)
xLength = shape::length(xShapeInfo);
__syncthreads();
if (i >= length)
return;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
int posI = shape::getIndexOffset(i, xShapeInfo, xLength);
int posIXJ = shape::getIndexOffset(ixj, xShapeInfo, xLength);
if ((i&k)==0) {
/* Sort ascending */
if (!descending == (x[posI]>x[posIXJ])) {
/* exchange(i,ixj); */
T temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
}
} else if ((i&k)!=0) {
/* Sort descending */
if (!descending == (x[posI]<x[posIXJ])) {
/* exchange(i,ixj); */
T temp = x[posI];
x[posI] = x[posIXJ];
x[posIXJ] = temp;
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ void bitonicSortStepGeneric(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int j, int k, int length, bool descending) {
bitonicSortStepKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, j, k, length, descending);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
__host__ void bitonicSortStepGenericKey(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) {
bitonicSortStepKernelKey<X,Y><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, j, k, length, descending);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
__host__ void bitonicSortStepGenericValue(dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending) {
bitonicSortStepKernelValue<X,Y><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, j, k, length, descending);
}
BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT bitonicSortStepGeneric, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, int j, int k, int length, bool descending), LIBND4J_TYPES);
BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT bitonicSortStepGenericKey, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);
BUILD_DOUBLE_TEMPLATE(template void ND4J_EXPORT bitonicSortStepGenericValue, (dim3 &launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, int j, int k, int length, bool descending), LIBND4J_TYPES, LIBND4J_TYPES);
|
d6a74f9af3bf3c0bd9d9e824b92f5b4b6aa1a5b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH/THH.h>
#include <THH/THHTensorMath.h>
#include <THH/THHGeneral.h>
#include <THH/THHBlas.h>
#include <THH/THHTensorCopy.h>
#include <THH/THHTensorRandom.h>
#include <TH/THHalf.h>
#include <THH/THHApply.cuh>
#include <THH/THHReduce.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHAtomics.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHTensor.hpp>
#include <THH/THHStorage.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <algorithm> // for std::min
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexCopyLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexCopySmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstCopyDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexFillLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex_ < dstFillDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexFillSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex_ < dstFillDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size)
{
IndexType linearIndex = static_cast<IndexType>(index);
assert(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info) - TH_INDEX_BASE;
}
struct WrapIndexOp {
WrapIndexOp(int64_t size) : size(size) {}
__device__ __forceinline__ void operator()(int64_t* out, int64_t* in) {
auto idx = *in;
assert(idx < size && idx >= -size);
*out = idx < 0 ? idx + size : idx;
}
int64_t size;
};
template <typename T, typename IndexType, int Dims>
struct TensorTakeOp {
TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* out, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
*out = info.data[offset];
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutOp {
TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
info.data[offset] = *value;
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutAccumulateOp {
TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end)
: info(info), numel(numel), start(start), end(end) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
if (index == start || *index != *(index - 1)) {
int64_t linear_index = *index;
auto offset = indexToOffset<Dims>(info, linear_index, numel);
do {
info.data[offset] = THCNumerics<T>::add(info.data[offset], *value);
index++;
value++;
} while (index != end && *index == linear_index);
}
}
const TensorInfo<T, IndexType> info;
IndexType numel;
int64_t* start;
int64_t* end;
};
template<typename IndexType, typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
// These are only valid if index is contiguous
auto start = THCudaLongTensor_data(state, index);
auto end = start + THCudaLongTensor_numel(state, index);
auto aInfo = getTensorInfo<T, TensorType, IndexType>(state, a);
aInfo.collapseDims();
auto numel = THCTensor_nElement(state, a);
if (aInfo.isContiguous()) {
auto op = Op<T, IndexType, -2>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
} else {
auto op = Op<T, IndexType, -1>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
}
}
template<typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
if (THCTensor_canUse32BitIndexMath(state, a, INT_MAX)) {
dispatchTakePutImpl<int32_t, T, Op>(state, a, b, index);
} else {
dispatchTakePutImpl<int64_t, T, Op>(state, a, b, index);
}
}
#include <THH/generic/THHTensorIndex.hip>
#include <THH/THHGenerateAllTypes.h>
| d6a74f9af3bf3c0bd9d9e824b92f5b4b6aa1a5b0.cu | #include <THC/THC.h>
#include <THC/THCTensorMath.h>
#include <THC/THCGeneral.h>
#include <THC/THCBlas.h>
#include <THC/THCTensorCopy.h>
#include <THC/THCTensorRandom.h>
#include <TH/THHalf.h>
#include <THC/THCApply.cuh>
#include <THC/THCReduce.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCAtomics.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCTensor.hpp>
#include <THC/THCStorage.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <algorithm> // for std::min
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexCopyLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexCopySmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstCopyDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexFillLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex_ < dstFillDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexFillSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex_ < dstFillDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size)
{
IndexType linearIndex = static_cast<IndexType>(index);
assert(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info) - TH_INDEX_BASE;
}
struct WrapIndexOp {
WrapIndexOp(int64_t size) : size(size) {}
__device__ __forceinline__ void operator()(int64_t* out, int64_t* in) {
auto idx = *in;
assert(idx < size && idx >= -size);
*out = idx < 0 ? idx + size : idx;
}
int64_t size;
};
template <typename T, typename IndexType, int Dims>
struct TensorTakeOp {
TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* out, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
*out = info.data[offset];
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutOp {
TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
info.data[offset] = *value;
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutAccumulateOp {
TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end)
: info(info), numel(numel), start(start), end(end) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
if (index == start || *index != *(index - 1)) {
int64_t linear_index = *index;
auto offset = indexToOffset<Dims>(info, linear_index, numel);
do {
info.data[offset] = THCNumerics<T>::add(info.data[offset], *value);
index++;
value++;
} while (index != end && *index == linear_index);
}
}
const TensorInfo<T, IndexType> info;
IndexType numel;
int64_t* start;
int64_t* end;
};
template<typename IndexType, typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
// These are only valid if index is contiguous
auto start = THCudaLongTensor_data(state, index);
auto end = start + THCudaLongTensor_numel(state, index);
auto aInfo = getTensorInfo<T, TensorType, IndexType>(state, a);
aInfo.collapseDims();
auto numel = THCTensor_nElement(state, a);
if (aInfo.isContiguous()) {
auto op = Op<T, IndexType, -2>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
} else {
auto op = Op<T, IndexType, -1>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
}
}
template<typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
if (THCTensor_canUse32BitIndexMath(state, a, INT_MAX)) {
dispatchTakePutImpl<int32_t, T, Op>(state, a, b, index);
} else {
dispatchTakePutImpl<int64_t, T, Op>(state, a, b, index);
}
}
#include <THC/generic/THCTensorIndex.cu>
#include <THC/THCGenerateAllTypes.h>
|
cc6c41db0fcae718f69e3e4064c16ff27a99d5c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o linearregcuda linearreg_cuda.cu -lm
*
* To run:
* ./linearcuda
*
*
*****************************************************************************/
typedef struct point_t{
double x;
double y;
}point_t;
int n_data = 1000;
__device__ int d_n_data =1000;
point_t data[] = {
{83.93,138.55},{72.52,122.82},{65.75,119.88},{72.44,139.81},
{65.08,112.81},{82.02,126.86},{73.57,124.36},{89.27,157.88},
{65.60,103.79},{82.17,120.94},{77.33,137.19},{85.58,142.59},
{74.61,134.50},{65.89,108.69},{72.11,132.32},{73.60,121.80},
{68.02,103.77},{13.40,59.66},{ 0.31,32.08},{53.48,96.97},
{45.68,81.56},{98.36,165.62},{46.28,94.74},{45.52,113.26},
{22.58,51.72},{72.86,126.99},{19.59,46.60},{87.54,128.13},
{16.09,51.26},{69.70,122.62},{33.05,86.34},{63.33,110.83},
{93.93,149.98},{ 0.92,27.15},{26.38,71.82},{57.05,98.26},
{16.21,61.96},{31.67,69.41},{13.43,59.64},{ 8.46,49.44},
{95.89,150.25},{95.43,167.23},{69.53,127.36},{34.46,77.43},
{60.35,107.34},{ 0.50,35.08},{90.66,157.80},{16.78,64.26},
{69.74,134.40},{ 0.25,41.74},{53.23,93.57},{80.17,135.71},
{ 6.54,33.28},{34.11,87.78},{ 2.86,19.95},{32.56,69.15},
{28.28,62.91},{33.17,90.85},{ 7.62,46.39},{68.16,115.15},
{92.35,147.88},{21.23,65.80},{47.14,85.59},{87.77,119.15},
{76.12,137.95},{37.49,74.92},{39.29,80.00},{44.71,90.70},
{42.10,104.10},{28.34,54.37},{50.45,85.02},{56.47,92.47},
{35.22,75.02},{88.57,166.80},{ 6.07,38.95},{63.48,118.90},
{76.77,142.52},{18.75,74.67},{28.78,74.43},{43.84,100.43},
{ 0.16,26.37},{29.21,65.85},{62.72,133.16},{39.25,66.23},
{85.05,147.58},{73.38,113.21},{46.34,82.59},{68.40,119.47},
{11.44,56.74},{19.95,65.39},{79.29,137.38},{79.80,142.68},
{52.15,112.49},{56.03,115.50},{67.16,115.66},{85.33,129.00},
{34.29,70.72},{97.74,151.99},{68.56,104.55},{49.97,71.59},
{46.38,71.61},{89.20,157.85},{54.90,98.79},{ 6.74,47.96},
{19.67,77.69},{55.14,108.36},{33.86,81.18},{10.20,43.16},
{42.03,89.50},{80.17,147.56},{85.07,142.96},{41.73,99.97},
{20.68,63.65},{73.40,144.82},{49.25,109.76},{27.89,69.11},
{80.33,146.00},{21.38,49.85},{62.95,118.61},{ 1.44,27.12},
{94.38,153.81},{83.01,152.04},{59.41,106.51},{95.76,149.08},
{ 5.90,30.95},{ 3.18,29.72},{11.16,48.38},{76.40,143.36},
{68.93,136.55},{62.07,107.83},{80.17,132.10},{58.48,106.66},
{69.13,107.67},{81.88,142.73},{81.70,137.70},{40.60,88.39},
{66.65,119.79},{22.91,68.87},{38.13,66.58},{69.03,130.53},
{90.24,155.14},{46.52,86.41},{14.99,56.92},{75.56,115.17},
{18.09,56.09},{30.52,78.03},{93.37,165.99},{ 4.26,31.92},
{30.94,81.34},{ 8.43,64.08},{42.79,99.67},{70.30,122.75},
{47.69,85.80},{64.77,104.36},{29.11,65.52},{46.10,87.98},
{12.27,52.14},{24.33,60.38},{ 0.52,37.89},{14.99,38.50},
{98.12,174.95},{26.33,62.86},{61.20,107.48},{ 0.21,50.65},
{85.41,153.44},{23.29,72.59},{44.86,98.04},{13.70,33.42},
{44.89,87.27},{69.07,121.71},{75.31,125.88},{70.98,124.66},
{74.63,129.20},{57.62,99.19},{ 4.93,47.07},{ 6.99,45.55},
{32.47,72.47},{61.36,108.31},{66.23,117.36},{74.85,147.03},
{27.07,61.46},{41.34,90.76},{90.58,162.86},{25.15,58.14},
{81.85,127.93},{56.80,107.64},{ 8.75,40.27},{13.11,51.97},
{47.54,99.42},{55.85,121.85},{91.99,149.87},{35.93,83.03},
{86.70,150.77},{57.26,104.50},{83.18,137.18},{62.85,111.72},
{66.38,100.51},{38.15,64.34},{78.34,130.14},{29.41,63.87},
{30.56,80.40},{24.52,64.02},{45.21,105.62},{ 6.72,39.72},
{11.67,56.33},{19.46,49.66},{40.88,93.53},{25.62,66.98},
{ 7.75,44.96},{39.61,73.33},{31.31,75.18},{31.02,67.79},
{ 0.84,26.65},{73.96,121.34},{56.48,94.16},{ 7.42,53.64},
{97.33,153.93},{76.93,139.36},{85.29,155.48},{14.61,36.13},
{60.27,98.15},{30.24,63.36},{96.60,167.11},{10.60,48.26},
{75.55,127.98},{47.79,99.14},{68.76,139.37},{49.20,85.70},
{70.12,126.18},{ 9.64,55.50},{65.00,110.46},{ 5.80,27.32},
{58.13,107.53},{80.65,125.69},{57.07,113.42},{ 2.46,36.25},
{34.52,74.94},{89.00,160.87},{13.12,47.48},{75.78,137.21},
{13.07,58.64},{91.07,156.47},{16.42,42.15},{44.37,90.97},
{16.50,57.42},{34.72,71.51},{ 2.49,35.96},{68.34,118.72},
{ 2.28,20.73},{52.53,104.94},{19.33,57.68},{46.63,87.49},
{ 4.04,51.35},{36.31,73.48},{75.46,121.14},{23.63,51.63},
{41.66,79.94},{94.64,150.47},{77.09,145.92},{87.76,142.38},
{ 5.90,31.17},{82.27,145.91},{81.11,132.03},{63.29,110.70},
{92.95,155.73},{82.20,138.97},{93.85,160.15},{43.27,86.28},
{32.35,81.14},{47.96,81.16},{50.27,112.29},{12.64,56.54},
{50.56,101.93},{28.53,76.30},{41.70,92.57},{ 2.14,38.56},
{47.53,90.55},{83.69,145.56},{63.20,114.75},{23.49,60.50},
{45.48,66.42},{ 1.44,47.74},{ 6.53,39.36},{35.93,81.55},
{39.04,71.04},{72.88,98.95},{17.30,57.22},{42.46,89.02},
{65.29,130.02},{43.40,81.51},{70.75,126.68},{85.97,140.38},
{71.37,116.03},{57.68,102.87},{48.73,109.81},{29.54,61.81},
{12.99,36.72},{58.51,100.83},{55.06,115.84},{41.27,81.68},
{95.46,139.08},{40.07,90.85},{64.05,117.87},{42.29,91.06},
{90.98,144.57},{29.16,68.37},{ 7.83,42.00},{48.06,86.27},
{94.48,157.82},{89.99,149.87},{35.20,71.98},{32.22,56.62},
{47.09,98.59},{85.54,127.16},{36.72,82.94},{36.01,75.92},
{ 6.90,29.41},{27.79,61.96},{90.41,151.25},{24.80,58.27},
{ 3.15,27.08},{41.65,78.81},{ 5.68,53.98},{ 1.34,37.18},
{26.34,60.24},{81.94,138.61},{61.67,106.42},{95.19,140.32},
{85.58,136.69},{43.53,78.15},{80.18,125.00},{97.18,162.47},
{72.37,144.27},{89.73,162.32},{41.43,77.31},{42.75,95.42},
{68.74,113.34},{15.64,34.64},{ 9.24,45.07},{37.89,102.87},
{10.48,59.05},{48.86,98.02},{ 5.63,36.69},{72.08,102.79},
{92.33,149.56},{16.73,45.24},{46.36,90.80},{24.83,68.06},
{80.48,150.63},{20.98,64.45},{80.63,143.90},{96.88,150.88},
{14.64,43.47},{42.90,102.30},{30.41,75.91},{46.39,119.63},
{76.53,131.83},{94.93,154.39},{28.47,66.64},{34.51,70.97},
{25.38,60.99},{88.59,121.79},{35.45,59.41},{73.00,131.11},
{24.03,33.96},{56.43,106.18},{ 3.47,57.03},{56.44,100.79},
{64.50,120.89},{76.78,127.02},{10.71,50.09},{31.62,71.31},
{67.89,135.90},{18.74,58.15},{25.99,64.30},{64.99,116.96},
{ 4.05,30.48},{73.02,134.72},{80.37,123.35},{14.33,51.49},
{83.51,133.53},{85.37,132.10},{21.45,63.42},{12.50,46.08},
{19.10,56.52},{14.77,50.66},{ 6.19,40.86},{79.33,138.71},
{67.03,135.44},{87.19,137.83},{18.56,64.58},{43.95,85.47},
{ 2.68,45.61},{19.80,69.46},{63.85,122.97},{32.66,74.13},
{22.52,54.51},{69.10,113.45},{96.13,152.68},{78.26,116.72},
{75.35,150.20},{40.42,65.87},{66.04,117.46},{61.18,125.02},
{91.26,158.82},{28.78,78.74},{83.82,149.69},{37.04,92.22},
{59.71,132.27},{86.71,155.44},{44.21,105.01},{64.64,124.00},
{81.08,126.04},{90.40,143.03},{28.99,63.60},{10.61,43.34},
{ 6.09,35.67},{86.80,146.80},{ 4.74,40.51},{22.16,75.61},
{95.42,160.87},{42.31,97.11},{79.45,127.72},{67.70,133.21},
{44.97,81.59},{62.14,106.48},{49.11,89.51},{23.15,49.57},
{69.43,117.25},{32.52,84.45},{38.18,88.52},{20.37,57.94},
{17.90,58.25},{87.61,149.04},{70.18,124.13},{59.29,111.64},
{81.08,119.48},{27.53,62.26},{75.90,147.53},{49.54,91.74},
{96.57,143.68},{26.88,62.99},{33.53,74.65},{13.78,60.12},
{77.20,136.76},{36.36,57.36},{69.37,121.64},{43.13,94.04},
{44.73,85.18},{59.19,93.87},{92.70,159.41},{57.89,110.56},
{ 4.97,43.11},{30.12,80.07},{67.29,115.70},{56.82,93.89},
{79.96,145.69},{ 0.24,32.68},{36.35,81.48},{53.27,96.14},
{58.14,119.24},{23.07,72.66},{21.82,85.44},{85.62,136.62},
{39.98,90.70},{66.47,132.12},{ 5.88,40.04},{16.93,54.79},
{88.44,147.54},{27.78,71.00},{17.21,45.88},{27.28,55.77},
{31.21,69.12},{53.56,102.01},{61.31,108.07},{53.82,89.92},
{59.13,107.76},{84.00,133.95},{48.37,91.23},{26.28,81.03},
{42.29,82.81},{49.67,106.51},{43.70,101.04},{80.43,129.00},
{ 1.48,19.30},{64.68,127.26},{71.79,142.68},{95.74,161.99},
{84.81,156.18},{65.74,124.08},{11.29,54.95},{69.57,113.88},
{67.90,111.80},{83.08,150.54},{24.01,75.08},{20.45,64.25},
{24.65,83.77},{ 4.36,28.30},{63.47,113.95},{50.57,116.85},
{58.74,91.76},{62.64,108.21},{80.94,144.01},{53.31,94.00},
{ 9.96,59.07},{86.10,141.11},{72.80,137.84},{64.29,106.70},
{87.24,148.45},{40.74,85.65},{ 7.67,57.36},{96.32,159.90},
{51.56,89.97},{ 6.07,38.84},{57.09,122.71},{57.50,109.01},
{55.54,122.09},{88.57,151.62},{ 2.29,33.75},{54.25,99.13},
{91.02,172.53},{37.02,65.41},{40.93,101.27},{35.47,84.82},
{81.22,138.00},{84.63,130.37},{45.65,88.35},{94.18,164.19},
{87.71,155.91},{ 9.19,48.52},{94.96,136.92},{71.01,110.69},
{68.79,128.29},{40.52,92.34},{ 8.73,47.66},{ 2.06,44.89},
{97.18,147.83},{34.26,60.40},{14.18,58.46},{52.38,117.49},
{14.48,62.38},{ 6.10,52.04},{81.05,130.25},{ 3.48,35.73},
{73.78,144.59},{10.79,42.76},{49.91,109.72},{79.61,129.90},
{27.31,72.39},{87.00,132.94},{36.59,100.57},{61.76,108.79},
{38.06,71.50},{64.91,136.85},{20.14,76.01},{45.45,94.73},
{61.91,97.69},{42.14,95.23},{76.44,140.26},{ 5.81,46.83},
{37.34,100.85},{87.30,150.03},{ 1.51,37.72},{92.90,134.24},
{95.33,136.51},{73.18,124.70},{98.83,150.63},{11.76,68.35},
{80.05,127.30},{68.51,126.88},{57.05,126.94},{83.51,147.29},
{87.99,139.28},{22.63,61.08},{61.78,110.76},{ 0.77,40.86},
{29.04,91.19},{22.65,62.93},{57.23,104.92},{51.11,118.38},
{55.48,118.49},{80.82,142.60},{94.31,142.94},{73.06,123.22},
{60.08,127.92},{20.41,42.15},{ 2.02,35.70},{26.47,62.19},
{78.80,131.93},{53.90,95.36},{87.28,153.11},{18.51,60.06},
{94.25,165.01},{77.61,124.99},{43.87,78.29},{81.91,129.31},
{49.34,93.72},{65.69,120.60},{82.54,135.66},{89.34,143.95},
{59.15,115.08},{16.27,42.15},{86.27,148.26},{40.22,84.98},
{56.84,113.68},{46.54,98.73},{69.43,129.22},{96.92,157.81},
{19.39,61.83},{98.25,174.25},{74.10,127.68},{31.81,79.36},
{10.55,34.88},{45.75,81.79},{69.75,120.53},{18.79,67.94},
{70.13,117.97},{19.16,60.13},{96.15,175.37},{88.82,174.16},
{22.49,63.91},{53.78,112.52},{87.44,135.97},{16.95,54.24},
{ 4.69,41.55},{86.09,155.43},{74.00,138.94},{56.21,101.64},
{22.33,73.11},{84.20,134.62},{39.29,88.79},{17.79,44.85},
{ 5.19,24.15},{ 0.42,24.56},{ 6.39,36.61},{28.84,75.02},
{ 7.40,27.75},{26.10,83.45},{88.61,152.71},{11.08,60.03},
{68.59,134.87},{93.87,158.37},{50.86,101.59},{24.78,59.72},
{ 3.28,42.54},{53.86,111.14},{76.42,149.08},{91.46,143.44},
{32.48,68.59},{64.87,118.38},{67.13,113.34},{83.72,123.72},
{50.74,82.96},{64.54,112.22},{68.64,112.28},{78.30,124.84},
{33.29,87.20},{96.31,149.51},{92.26,150.48},{46.82,101.00},
{56.04,95.14},{24.15,64.23},{53.74,105.95},{80.23,147.26},
{90.44,145.88},{25.85,50.78},{76.17,134.98},{30.77,89.23},
{76.70,141.78},{80.85,140.80},{31.00,80.52},{ 6.18,47.46},
{58.01,120.82},{92.60,153.36},{ 1.42,19.57},{32.41,72.42},
{69.28,133.70},{ 9.76,43.95},{91.50,160.92},{46.78,92.37},
{32.49,99.91},{67.14,122.63},{12.76,42.72},{72.32,137.30},
{84.35,130.13},{ 7.77,48.91},{51.27,89.46},{55.28,112.12},
{67.97,133.39},{31.13,55.41},{35.17,83.88},{69.97,125.33},
{32.83,67.16},{79.67,136.73},{44.47,105.74},{25.49,77.04},
{70.12,129.90},{26.96,65.71},{17.51,37.12},{90.31,162.05},
{ 4.09,41.32},{87.08,160.68},{72.63,134.35},{44.82,92.09},
{92.11,168.35},{21.54,52.57},{41.01,89.99},{63.90,109.83},
{88.17,154.35},{43.26,103.70},{73.85,153.34},{17.56,54.53},
{70.13,141.42},{81.79,130.61},{32.92,65.19},{13.86,56.05},
{65.98,120.44},{69.38,142.25},{70.31,135.35},{15.31,70.37},
{57.01,108.61},{28.82,72.69},{91.81,143.65},{10.36,40.88},
{76.55,140.72},{ 0.16,43.74},{51.14,84.90},{69.35,127.84},
{48.41,117.74},{ 3.20,31.09},{69.39,130.73},{48.14,95.01},
{86.38,130.90},{95.01,157.76},{36.10,80.19},{81.70,157.52},
{74.01,130.55},{52.70,98.00},{47.05,92.58},{18.86,73.69},
{99.81,177.39},{42.58,85.86},{87.36,143.96},{88.59,146.86},
{11.78,46.42},{38.22,69.07},{45.15,91.00},{95.72,160.53},
{79.70,151.23},{26.54,60.76},{34.48,65.17},{ 1.30,42.50},
{16.02,50.61},{16.89,39.35},{27.74,57.75},{84.22,145.18},
{93.30,158.41},{61.90,109.51},{95.01,151.90},{72.08,124.39},
{66.39,113.08},{35.20,94.10},{81.76,136.24},{76.62,147.22},
{10.27,34.46},{66.68,124.17},{14.11,45.76},{86.00,128.71},
{79.77,135.12},{97.39,156.73},{42.78,91.86},{37.94,79.97},
{78.91,139.30},{11.36,39.99},{89.52,176.96},{30.14,69.25},
{29.96,59.10},{56.44,84.61},{60.42,112.61},{ 0.46,18.37},
{84.28,151.89},{13.08,69.80},{54.32,103.93},{80.00,125.14},
{57.69,112.88},{64.95,109.12},{64.57,125.66},{ 6.81,40.09},
{ 0.33,42.09},{59.28,108.39},{89.30,142.41},{98.57,162.71},
{31.85,71.60},{18.61,44.87},{76.37,133.43},{83.08,145.78},
{11.84,40.90},{81.95,131.01},{75.93,124.21},{53.90,96.87},
{29.33,71.10},{24.52,63.89},{22.83,58.46},{34.00,90.81},
{42.13,71.98},{15.23,53.27},{27.19,66.93},{33.64,84.89},
{91.75,148.25},{80.39,133.27},{71.78,116.56},{57.73,109.62},
{18.23,68.03},{14.49,41.69},{75.37,135.10},{ 0.16,38.89},
{14.78,66.76},{47.19,80.92},{15.93,37.76},{35.52,57.09},
{43.08,81.20},{29.83,52.19},{72.29,131.16},{80.66,141.31},
{75.16,142.95},{37.29,104.63},{ 5.43,49.61},{85.20,136.08},
{68.02,128.15},{12.87,60.53},{83.30,124.03},{ 9.54,57.03},
{49.01,100.62},{83.58,134.65},{78.51,114.55},{21.09,65.51},
{50.09,84.70},{32.46,59.39},{48.97,87.15},{36.99,102.00},
{78.10,141.60},{84.68,146.88},{ 6.22,53.86},{99.89,164.32},
{86.09,127.43},{88.37,148.92},{16.25,56.35},{43.39,98.05},
{54.42,112.46},{65.59,112.13},{50.92,99.20},{22.42,58.63},
{92.66,158.65},{85.73,146.58},{84.56,141.25},{51.72,107.39},
{ 4.75,50.91},{42.75,106.40},{45.60,109.30},{67.87,119.60},
{29.04,83.16},{98.19,165.23},{39.40,95.26},{78.21,141.99},
{ 3.72,30.08},{54.79,93.37},{21.74,58.98},{32.19,90.10},
{ 2.66,16.32},{49.64,99.26},{15.16,50.22},{ 9.40,49.22},
{22.03,59.10},{25.18,68.57},{39.06,84.40},{73.64,121.03},
{83.12,148.85},{88.69,140.65},{12.37,56.02},{85.53,148.62},
{36.48,85.40},{94.91,154.71},{86.84,133.62},{89.87,146.77},
{79.92,144.33},{77.98,136.50},{21.25,49.98},{62.50,116.43},
{99.83,165.84},{32.41,76.30},{29.03,64.50},{42.86,97.78},
{47.12,110.56},{ 7.96,26.81},{82.98,142.09},{92.02,158.75},
{83.51,151.37},{42.28,85.65},{67.91,120.42},{73.50,103.47},
{37.39,82.91},{35.91,70.12},{44.67,105.13},{66.54,118.29},
{44.67,105.00},{38.97,83.57},{20.66,61.67},{42.93,87.86},
{71.60,122.37},{48.66,101.28},{11.56,45.94},{51.76,98.86},
{39.94,82.48},{60.05,102.64},{31.11,59.89},{80.29,146.27},
{44.89,83.13},{45.84,87.27},{82.98,129.23},{ 3.70,59.54},
{48.25,98.47},{97.44,161.37},{84.99,124.35},{20.17,64.51},
{95.99,138.53},{74.65,114.95},{62.69,122.51},{17.60,63.37},
{95.07,140.34},{85.11,153.88},{ 4.90,48.11},{98.32,149.20},
{80.94,147.58},{85.69,151.15},{75.77,136.52},{49.44,98.78},
{42.27,88.69},{31.41,59.81},{73.96,138.56},{ 4.67,40.61},
{ 6.35,19.09},{39.73,86.81},{ 7.24,47.64},{23.31,67.01},
{88.78,138.19},{88.67,136.16},{41.81,75.81},{67.60,128.29},
{88.26,154.91},{50.61,100.22},{13.98,26.01},{80.92,129.02},
{28.74,88.34},{12.36,44.23},{68.49,116.10},{55.66,102.21},
{93.29,154.69},{67.40,139.27},{71.30,134.93},{ 3.38,49.83},
{77.36,134.60},{17.47,55.60},{19.14,48.89},{39.04,94.50},
{21.94,62.29},{78.69,128.35},{71.75,118.48},{99.31,170.87},
{ 1.67,37.14},{61.28,106.12},{77.28,132.02},{42.39,94.44},
{16.77,52.25},{39.24,93.11},{53.08,89.75},{62.60,121.08},
{66.30,127.72},{73.39,111.45},{62.33,121.65},{92.20,141.36},
{62.74,109.55},{54.99,104.45},{23.84,53.39},{39.62,86.73},
{92.33,138.31},{ 3.53,16.56},{53.26,109.60},{86.69,139.40},
{40.71,76.31},{92.11,147.09},{67.87,119.73},{20.73,51.21},
{97.95,152.69},{72.81,129.82},{65.27,149.39},{57.35,103.25},
{22.47,71.81},{94.28,153.77},{98.91,157.04},{35.35,74.74}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) {
int i = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0){
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(){
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be= rms_error(bm,bc);
error=hipMalloc(&d_dm,(sizeof(double) * 8));
if(error){
fprintf(stderr,"hipMalloc on d_dm returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_dc,(sizeof(double) * 8));
if(error){
fprintf(stderr,"hipMalloc on d_dc returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"hipMalloc on d_error_sum_arr returned %d %s\n",error, //371
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"hipMalloc on d_data returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i]= bc + (oc[i] * step);
}
error = hipMemcpy(d_dm,dm,(sizeof(double)*8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr,"hipMemcpy to d_dm returned %d %s\n",error,
hipGetErrorString(error));
}
error = hipMemcpy(d_dc,dc,(sizeof(double)*8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr,"hipMemcpy to d_dc returned %d %s\n",error,
hipGetErrorString(error));
}
error = hipMemcpy(d_data, data,sizeof(data), hipMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"hipMemcpy to d_data returned %d %s\n",error,
hipGetErrorString(error));
}
for(i=0;i<8;i++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
hipLaunchKernelGGL((
d_rms_error) , dim3(100),dim3(10), 0, 0, &d_dm[i],&d_dc[i],d_error_sum_arr,d_data);
hipDeviceSynchronize();
error =hipMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000),
hipMemcpyDeviceToHost);
if(error){
fprintf(stderr,"hipMemcpy to error_sum returned %d %s\n",error,
hipGetErrorString(error));
}
for(int j=0;j<n_data;j++){
error_sum_total+= h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] =sqrt(error_sum_mean);
if(e[i] < best_error){
best_error = e[i];
error_sum_total +=h_error_sum_arr[i];
}
error_sum_mean = error_sum_total /n_data;//431
e[i] = sqrt(error_sum_mean); //432
if(e[i]<best_error){ //434
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0; //438
}
if(best_error <be){
be=best_error;
bm =dm[best_error_i];
bc= dc[best_error_i];
}else {
minimum_found = 1;
}
}
error = hipFree(d_dm);
if(error){
fprintf(stderr,"hipFree on d_dm returned %d %s\n",error,
hipGetErrorString(error)); //453
exit(1);
}
error = hipFree(d_dc);
if(error){
fprintf(stderr,"hipFree on d_dc returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_data);
if(error){
fprintf(stderr,"hipFree on d_data returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_error_sum_arr);
if(error){
fprintf(stderr,"hipFree on d_error_sum_arr returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
;
| cc6c41db0fcae718f69e3e4064c16ff27a99d5c6.cu | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o linearregcuda linearreg_cuda.cu -lm
*
* To run:
* ./linearcuda
*
*
*****************************************************************************/
typedef struct point_t{
double x;
double y;
}point_t;
int n_data = 1000;
__device__ int d_n_data =1000;
point_t data[] = {
{83.93,138.55},{72.52,122.82},{65.75,119.88},{72.44,139.81},
{65.08,112.81},{82.02,126.86},{73.57,124.36},{89.27,157.88},
{65.60,103.79},{82.17,120.94},{77.33,137.19},{85.58,142.59},
{74.61,134.50},{65.89,108.69},{72.11,132.32},{73.60,121.80},
{68.02,103.77},{13.40,59.66},{ 0.31,32.08},{53.48,96.97},
{45.68,81.56},{98.36,165.62},{46.28,94.74},{45.52,113.26},
{22.58,51.72},{72.86,126.99},{19.59,46.60},{87.54,128.13},
{16.09,51.26},{69.70,122.62},{33.05,86.34},{63.33,110.83},
{93.93,149.98},{ 0.92,27.15},{26.38,71.82},{57.05,98.26},
{16.21,61.96},{31.67,69.41},{13.43,59.64},{ 8.46,49.44},
{95.89,150.25},{95.43,167.23},{69.53,127.36},{34.46,77.43},
{60.35,107.34},{ 0.50,35.08},{90.66,157.80},{16.78,64.26},
{69.74,134.40},{ 0.25,41.74},{53.23,93.57},{80.17,135.71},
{ 6.54,33.28},{34.11,87.78},{ 2.86,19.95},{32.56,69.15},
{28.28,62.91},{33.17,90.85},{ 7.62,46.39},{68.16,115.15},
{92.35,147.88},{21.23,65.80},{47.14,85.59},{87.77,119.15},
{76.12,137.95},{37.49,74.92},{39.29,80.00},{44.71,90.70},
{42.10,104.10},{28.34,54.37},{50.45,85.02},{56.47,92.47},
{35.22,75.02},{88.57,166.80},{ 6.07,38.95},{63.48,118.90},
{76.77,142.52},{18.75,74.67},{28.78,74.43},{43.84,100.43},
{ 0.16,26.37},{29.21,65.85},{62.72,133.16},{39.25,66.23},
{85.05,147.58},{73.38,113.21},{46.34,82.59},{68.40,119.47},
{11.44,56.74},{19.95,65.39},{79.29,137.38},{79.80,142.68},
{52.15,112.49},{56.03,115.50},{67.16,115.66},{85.33,129.00},
{34.29,70.72},{97.74,151.99},{68.56,104.55},{49.97,71.59},
{46.38,71.61},{89.20,157.85},{54.90,98.79},{ 6.74,47.96},
{19.67,77.69},{55.14,108.36},{33.86,81.18},{10.20,43.16},
{42.03,89.50},{80.17,147.56},{85.07,142.96},{41.73,99.97},
{20.68,63.65},{73.40,144.82},{49.25,109.76},{27.89,69.11},
{80.33,146.00},{21.38,49.85},{62.95,118.61},{ 1.44,27.12},
{94.38,153.81},{83.01,152.04},{59.41,106.51},{95.76,149.08},
{ 5.90,30.95},{ 3.18,29.72},{11.16,48.38},{76.40,143.36},
{68.93,136.55},{62.07,107.83},{80.17,132.10},{58.48,106.66},
{69.13,107.67},{81.88,142.73},{81.70,137.70},{40.60,88.39},
{66.65,119.79},{22.91,68.87},{38.13,66.58},{69.03,130.53},
{90.24,155.14},{46.52,86.41},{14.99,56.92},{75.56,115.17},
{18.09,56.09},{30.52,78.03},{93.37,165.99},{ 4.26,31.92},
{30.94,81.34},{ 8.43,64.08},{42.79,99.67},{70.30,122.75},
{47.69,85.80},{64.77,104.36},{29.11,65.52},{46.10,87.98},
{12.27,52.14},{24.33,60.38},{ 0.52,37.89},{14.99,38.50},
{98.12,174.95},{26.33,62.86},{61.20,107.48},{ 0.21,50.65},
{85.41,153.44},{23.29,72.59},{44.86,98.04},{13.70,33.42},
{44.89,87.27},{69.07,121.71},{75.31,125.88},{70.98,124.66},
{74.63,129.20},{57.62,99.19},{ 4.93,47.07},{ 6.99,45.55},
{32.47,72.47},{61.36,108.31},{66.23,117.36},{74.85,147.03},
{27.07,61.46},{41.34,90.76},{90.58,162.86},{25.15,58.14},
{81.85,127.93},{56.80,107.64},{ 8.75,40.27},{13.11,51.97},
{47.54,99.42},{55.85,121.85},{91.99,149.87},{35.93,83.03},
{86.70,150.77},{57.26,104.50},{83.18,137.18},{62.85,111.72},
{66.38,100.51},{38.15,64.34},{78.34,130.14},{29.41,63.87},
{30.56,80.40},{24.52,64.02},{45.21,105.62},{ 6.72,39.72},
{11.67,56.33},{19.46,49.66},{40.88,93.53},{25.62,66.98},
{ 7.75,44.96},{39.61,73.33},{31.31,75.18},{31.02,67.79},
{ 0.84,26.65},{73.96,121.34},{56.48,94.16},{ 7.42,53.64},
{97.33,153.93},{76.93,139.36},{85.29,155.48},{14.61,36.13},
{60.27,98.15},{30.24,63.36},{96.60,167.11},{10.60,48.26},
{75.55,127.98},{47.79,99.14},{68.76,139.37},{49.20,85.70},
{70.12,126.18},{ 9.64,55.50},{65.00,110.46},{ 5.80,27.32},
{58.13,107.53},{80.65,125.69},{57.07,113.42},{ 2.46,36.25},
{34.52,74.94},{89.00,160.87},{13.12,47.48},{75.78,137.21},
{13.07,58.64},{91.07,156.47},{16.42,42.15},{44.37,90.97},
{16.50,57.42},{34.72,71.51},{ 2.49,35.96},{68.34,118.72},
{ 2.28,20.73},{52.53,104.94},{19.33,57.68},{46.63,87.49},
{ 4.04,51.35},{36.31,73.48},{75.46,121.14},{23.63,51.63},
{41.66,79.94},{94.64,150.47},{77.09,145.92},{87.76,142.38},
{ 5.90,31.17},{82.27,145.91},{81.11,132.03},{63.29,110.70},
{92.95,155.73},{82.20,138.97},{93.85,160.15},{43.27,86.28},
{32.35,81.14},{47.96,81.16},{50.27,112.29},{12.64,56.54},
{50.56,101.93},{28.53,76.30},{41.70,92.57},{ 2.14,38.56},
{47.53,90.55},{83.69,145.56},{63.20,114.75},{23.49,60.50},
{45.48,66.42},{ 1.44,47.74},{ 6.53,39.36},{35.93,81.55},
{39.04,71.04},{72.88,98.95},{17.30,57.22},{42.46,89.02},
{65.29,130.02},{43.40,81.51},{70.75,126.68},{85.97,140.38},
{71.37,116.03},{57.68,102.87},{48.73,109.81},{29.54,61.81},
{12.99,36.72},{58.51,100.83},{55.06,115.84},{41.27,81.68},
{95.46,139.08},{40.07,90.85},{64.05,117.87},{42.29,91.06},
{90.98,144.57},{29.16,68.37},{ 7.83,42.00},{48.06,86.27},
{94.48,157.82},{89.99,149.87},{35.20,71.98},{32.22,56.62},
{47.09,98.59},{85.54,127.16},{36.72,82.94},{36.01,75.92},
{ 6.90,29.41},{27.79,61.96},{90.41,151.25},{24.80,58.27},
{ 3.15,27.08},{41.65,78.81},{ 5.68,53.98},{ 1.34,37.18},
{26.34,60.24},{81.94,138.61},{61.67,106.42},{95.19,140.32},
{85.58,136.69},{43.53,78.15},{80.18,125.00},{97.18,162.47},
{72.37,144.27},{89.73,162.32},{41.43,77.31},{42.75,95.42},
{68.74,113.34},{15.64,34.64},{ 9.24,45.07},{37.89,102.87},
{10.48,59.05},{48.86,98.02},{ 5.63,36.69},{72.08,102.79},
{92.33,149.56},{16.73,45.24},{46.36,90.80},{24.83,68.06},
{80.48,150.63},{20.98,64.45},{80.63,143.90},{96.88,150.88},
{14.64,43.47},{42.90,102.30},{30.41,75.91},{46.39,119.63},
{76.53,131.83},{94.93,154.39},{28.47,66.64},{34.51,70.97},
{25.38,60.99},{88.59,121.79},{35.45,59.41},{73.00,131.11},
{24.03,33.96},{56.43,106.18},{ 3.47,57.03},{56.44,100.79},
{64.50,120.89},{76.78,127.02},{10.71,50.09},{31.62,71.31},
{67.89,135.90},{18.74,58.15},{25.99,64.30},{64.99,116.96},
{ 4.05,30.48},{73.02,134.72},{80.37,123.35},{14.33,51.49},
{83.51,133.53},{85.37,132.10},{21.45,63.42},{12.50,46.08},
{19.10,56.52},{14.77,50.66},{ 6.19,40.86},{79.33,138.71},
{67.03,135.44},{87.19,137.83},{18.56,64.58},{43.95,85.47},
{ 2.68,45.61},{19.80,69.46},{63.85,122.97},{32.66,74.13},
{22.52,54.51},{69.10,113.45},{96.13,152.68},{78.26,116.72},
{75.35,150.20},{40.42,65.87},{66.04,117.46},{61.18,125.02},
{91.26,158.82},{28.78,78.74},{83.82,149.69},{37.04,92.22},
{59.71,132.27},{86.71,155.44},{44.21,105.01},{64.64,124.00},
{81.08,126.04},{90.40,143.03},{28.99,63.60},{10.61,43.34},
{ 6.09,35.67},{86.80,146.80},{ 4.74,40.51},{22.16,75.61},
{95.42,160.87},{42.31,97.11},{79.45,127.72},{67.70,133.21},
{44.97,81.59},{62.14,106.48},{49.11,89.51},{23.15,49.57},
{69.43,117.25},{32.52,84.45},{38.18,88.52},{20.37,57.94},
{17.90,58.25},{87.61,149.04},{70.18,124.13},{59.29,111.64},
{81.08,119.48},{27.53,62.26},{75.90,147.53},{49.54,91.74},
{96.57,143.68},{26.88,62.99},{33.53,74.65},{13.78,60.12},
{77.20,136.76},{36.36,57.36},{69.37,121.64},{43.13,94.04},
{44.73,85.18},{59.19,93.87},{92.70,159.41},{57.89,110.56},
{ 4.97,43.11},{30.12,80.07},{67.29,115.70},{56.82,93.89},
{79.96,145.69},{ 0.24,32.68},{36.35,81.48},{53.27,96.14},
{58.14,119.24},{23.07,72.66},{21.82,85.44},{85.62,136.62},
{39.98,90.70},{66.47,132.12},{ 5.88,40.04},{16.93,54.79},
{88.44,147.54},{27.78,71.00},{17.21,45.88},{27.28,55.77},
{31.21,69.12},{53.56,102.01},{61.31,108.07},{53.82,89.92},
{59.13,107.76},{84.00,133.95},{48.37,91.23},{26.28,81.03},
{42.29,82.81},{49.67,106.51},{43.70,101.04},{80.43,129.00},
{ 1.48,19.30},{64.68,127.26},{71.79,142.68},{95.74,161.99},
{84.81,156.18},{65.74,124.08},{11.29,54.95},{69.57,113.88},
{67.90,111.80},{83.08,150.54},{24.01,75.08},{20.45,64.25},
{24.65,83.77},{ 4.36,28.30},{63.47,113.95},{50.57,116.85},
{58.74,91.76},{62.64,108.21},{80.94,144.01},{53.31,94.00},
{ 9.96,59.07},{86.10,141.11},{72.80,137.84},{64.29,106.70},
{87.24,148.45},{40.74,85.65},{ 7.67,57.36},{96.32,159.90},
{51.56,89.97},{ 6.07,38.84},{57.09,122.71},{57.50,109.01},
{55.54,122.09},{88.57,151.62},{ 2.29,33.75},{54.25,99.13},
{91.02,172.53},{37.02,65.41},{40.93,101.27},{35.47,84.82},
{81.22,138.00},{84.63,130.37},{45.65,88.35},{94.18,164.19},
{87.71,155.91},{ 9.19,48.52},{94.96,136.92},{71.01,110.69},
{68.79,128.29},{40.52,92.34},{ 8.73,47.66},{ 2.06,44.89},
{97.18,147.83},{34.26,60.40},{14.18,58.46},{52.38,117.49},
{14.48,62.38},{ 6.10,52.04},{81.05,130.25},{ 3.48,35.73},
{73.78,144.59},{10.79,42.76},{49.91,109.72},{79.61,129.90},
{27.31,72.39},{87.00,132.94},{36.59,100.57},{61.76,108.79},
{38.06,71.50},{64.91,136.85},{20.14,76.01},{45.45,94.73},
{61.91,97.69},{42.14,95.23},{76.44,140.26},{ 5.81,46.83},
{37.34,100.85},{87.30,150.03},{ 1.51,37.72},{92.90,134.24},
{95.33,136.51},{73.18,124.70},{98.83,150.63},{11.76,68.35},
{80.05,127.30},{68.51,126.88},{57.05,126.94},{83.51,147.29},
{87.99,139.28},{22.63,61.08},{61.78,110.76},{ 0.77,40.86},
{29.04,91.19},{22.65,62.93},{57.23,104.92},{51.11,118.38},
{55.48,118.49},{80.82,142.60},{94.31,142.94},{73.06,123.22},
{60.08,127.92},{20.41,42.15},{ 2.02,35.70},{26.47,62.19},
{78.80,131.93},{53.90,95.36},{87.28,153.11},{18.51,60.06},
{94.25,165.01},{77.61,124.99},{43.87,78.29},{81.91,129.31},
{49.34,93.72},{65.69,120.60},{82.54,135.66},{89.34,143.95},
{59.15,115.08},{16.27,42.15},{86.27,148.26},{40.22,84.98},
{56.84,113.68},{46.54,98.73},{69.43,129.22},{96.92,157.81},
{19.39,61.83},{98.25,174.25},{74.10,127.68},{31.81,79.36},
{10.55,34.88},{45.75,81.79},{69.75,120.53},{18.79,67.94},
{70.13,117.97},{19.16,60.13},{96.15,175.37},{88.82,174.16},
{22.49,63.91},{53.78,112.52},{87.44,135.97},{16.95,54.24},
{ 4.69,41.55},{86.09,155.43},{74.00,138.94},{56.21,101.64},
{22.33,73.11},{84.20,134.62},{39.29,88.79},{17.79,44.85},
{ 5.19,24.15},{ 0.42,24.56},{ 6.39,36.61},{28.84,75.02},
{ 7.40,27.75},{26.10,83.45},{88.61,152.71},{11.08,60.03},
{68.59,134.87},{93.87,158.37},{50.86,101.59},{24.78,59.72},
{ 3.28,42.54},{53.86,111.14},{76.42,149.08},{91.46,143.44},
{32.48,68.59},{64.87,118.38},{67.13,113.34},{83.72,123.72},
{50.74,82.96},{64.54,112.22},{68.64,112.28},{78.30,124.84},
{33.29,87.20},{96.31,149.51},{92.26,150.48},{46.82,101.00},
{56.04,95.14},{24.15,64.23},{53.74,105.95},{80.23,147.26},
{90.44,145.88},{25.85,50.78},{76.17,134.98},{30.77,89.23},
{76.70,141.78},{80.85,140.80},{31.00,80.52},{ 6.18,47.46},
{58.01,120.82},{92.60,153.36},{ 1.42,19.57},{32.41,72.42},
{69.28,133.70},{ 9.76,43.95},{91.50,160.92},{46.78,92.37},
{32.49,99.91},{67.14,122.63},{12.76,42.72},{72.32,137.30},
{84.35,130.13},{ 7.77,48.91},{51.27,89.46},{55.28,112.12},
{67.97,133.39},{31.13,55.41},{35.17,83.88},{69.97,125.33},
{32.83,67.16},{79.67,136.73},{44.47,105.74},{25.49,77.04},
{70.12,129.90},{26.96,65.71},{17.51,37.12},{90.31,162.05},
{ 4.09,41.32},{87.08,160.68},{72.63,134.35},{44.82,92.09},
{92.11,168.35},{21.54,52.57},{41.01,89.99},{63.90,109.83},
{88.17,154.35},{43.26,103.70},{73.85,153.34},{17.56,54.53},
{70.13,141.42},{81.79,130.61},{32.92,65.19},{13.86,56.05},
{65.98,120.44},{69.38,142.25},{70.31,135.35},{15.31,70.37},
{57.01,108.61},{28.82,72.69},{91.81,143.65},{10.36,40.88},
{76.55,140.72},{ 0.16,43.74},{51.14,84.90},{69.35,127.84},
{48.41,117.74},{ 3.20,31.09},{69.39,130.73},{48.14,95.01},
{86.38,130.90},{95.01,157.76},{36.10,80.19},{81.70,157.52},
{74.01,130.55},{52.70,98.00},{47.05,92.58},{18.86,73.69},
{99.81,177.39},{42.58,85.86},{87.36,143.96},{88.59,146.86},
{11.78,46.42},{38.22,69.07},{45.15,91.00},{95.72,160.53},
{79.70,151.23},{26.54,60.76},{34.48,65.17},{ 1.30,42.50},
{16.02,50.61},{16.89,39.35},{27.74,57.75},{84.22,145.18},
{93.30,158.41},{61.90,109.51},{95.01,151.90},{72.08,124.39},
{66.39,113.08},{35.20,94.10},{81.76,136.24},{76.62,147.22},
{10.27,34.46},{66.68,124.17},{14.11,45.76},{86.00,128.71},
{79.77,135.12},{97.39,156.73},{42.78,91.86},{37.94,79.97},
{78.91,139.30},{11.36,39.99},{89.52,176.96},{30.14,69.25},
{29.96,59.10},{56.44,84.61},{60.42,112.61},{ 0.46,18.37},
{84.28,151.89},{13.08,69.80},{54.32,103.93},{80.00,125.14},
{57.69,112.88},{64.95,109.12},{64.57,125.66},{ 6.81,40.09},
{ 0.33,42.09},{59.28,108.39},{89.30,142.41},{98.57,162.71},
{31.85,71.60},{18.61,44.87},{76.37,133.43},{83.08,145.78},
{11.84,40.90},{81.95,131.01},{75.93,124.21},{53.90,96.87},
{29.33,71.10},{24.52,63.89},{22.83,58.46},{34.00,90.81},
{42.13,71.98},{15.23,53.27},{27.19,66.93},{33.64,84.89},
{91.75,148.25},{80.39,133.27},{71.78,116.56},{57.73,109.62},
{18.23,68.03},{14.49,41.69},{75.37,135.10},{ 0.16,38.89},
{14.78,66.76},{47.19,80.92},{15.93,37.76},{35.52,57.09},
{43.08,81.20},{29.83,52.19},{72.29,131.16},{80.66,141.31},
{75.16,142.95},{37.29,104.63},{ 5.43,49.61},{85.20,136.08},
{68.02,128.15},{12.87,60.53},{83.30,124.03},{ 9.54,57.03},
{49.01,100.62},{83.58,134.65},{78.51,114.55},{21.09,65.51},
{50.09,84.70},{32.46,59.39},{48.97,87.15},{36.99,102.00},
{78.10,141.60},{84.68,146.88},{ 6.22,53.86},{99.89,164.32},
{86.09,127.43},{88.37,148.92},{16.25,56.35},{43.39,98.05},
{54.42,112.46},{65.59,112.13},{50.92,99.20},{22.42,58.63},
{92.66,158.65},{85.73,146.58},{84.56,141.25},{51.72,107.39},
{ 4.75,50.91},{42.75,106.40},{45.60,109.30},{67.87,119.60},
{29.04,83.16},{98.19,165.23},{39.40,95.26},{78.21,141.99},
{ 3.72,30.08},{54.79,93.37},{21.74,58.98},{32.19,90.10},
{ 2.66,16.32},{49.64,99.26},{15.16,50.22},{ 9.40,49.22},
{22.03,59.10},{25.18,68.57},{39.06,84.40},{73.64,121.03},
{83.12,148.85},{88.69,140.65},{12.37,56.02},{85.53,148.62},
{36.48,85.40},{94.91,154.71},{86.84,133.62},{89.87,146.77},
{79.92,144.33},{77.98,136.50},{21.25,49.98},{62.50,116.43},
{99.83,165.84},{32.41,76.30},{29.03,64.50},{42.86,97.78},
{47.12,110.56},{ 7.96,26.81},{82.98,142.09},{92.02,158.75},
{83.51,151.37},{42.28,85.65},{67.91,120.42},{73.50,103.47},
{37.39,82.91},{35.91,70.12},{44.67,105.13},{66.54,118.29},
{44.67,105.00},{38.97,83.57},{20.66,61.67},{42.93,87.86},
{71.60,122.37},{48.66,101.28},{11.56,45.94},{51.76,98.86},
{39.94,82.48},{60.05,102.64},{31.11,59.89},{80.29,146.27},
{44.89,83.13},{45.84,87.27},{82.98,129.23},{ 3.70,59.54},
{48.25,98.47},{97.44,161.37},{84.99,124.35},{20.17,64.51},
{95.99,138.53},{74.65,114.95},{62.69,122.51},{17.60,63.37},
{95.07,140.34},{85.11,153.88},{ 4.90,48.11},{98.32,149.20},
{80.94,147.58},{85.69,151.15},{75.77,136.52},{49.44,98.78},
{42.27,88.69},{31.41,59.81},{73.96,138.56},{ 4.67,40.61},
{ 6.35,19.09},{39.73,86.81},{ 7.24,47.64},{23.31,67.01},
{88.78,138.19},{88.67,136.16},{41.81,75.81},{67.60,128.29},
{88.26,154.91},{50.61,100.22},{13.98,26.01},{80.92,129.02},
{28.74,88.34},{12.36,44.23},{68.49,116.10},{55.66,102.21},
{93.29,154.69},{67.40,139.27},{71.30,134.93},{ 3.38,49.83},
{77.36,134.60},{17.47,55.60},{19.14,48.89},{39.04,94.50},
{21.94,62.29},{78.69,128.35},{71.75,118.48},{99.31,170.87},
{ 1.67,37.14},{61.28,106.12},{77.28,132.02},{42.39,94.44},
{16.77,52.25},{39.24,93.11},{53.08,89.75},{62.60,121.08},
{66.30,127.72},{73.39,111.45},{62.33,121.65},{92.20,141.36},
{62.74,109.55},{54.99,104.45},{23.84,53.39},{39.62,86.73},
{92.33,138.31},{ 3.53,16.56},{53.26,109.60},{86.69,139.40},
{40.71,76.31},{92.11,147.09},{67.87,119.73},{20.73,51.21},
{97.95,152.69},{72.81,129.82},{65.27,149.39},{57.35,103.25},
{22.47,71.81},{94.28,153.77},{98.91,157.04},{35.35,74.74}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) {
int i = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0){
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(){
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be= rms_error(bm,bc);
error=cudaMalloc(&d_dm,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dm returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_dc,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"cudaMalloc on d_error_sum_arr returned %d %s\n",error, //371
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"cudaMalloc on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i]= bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm,dm,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dm returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc,dc,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dc returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data,sizeof(data), cudaMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"cudaMemcpy to d_data returned %d %s\n",error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i],&d_dc[i],d_error_sum_arr,d_data);
cudaThreadSynchronize();
error =cudaMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000),
cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr,"cudaMemcpy to error_sum returned %d %s\n",error,
cudaGetErrorString(error));
}
for(int j=0;j<n_data;j++){
error_sum_total+= h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] =sqrt(error_sum_mean);
if(e[i] < best_error){
best_error = e[i];
error_sum_total +=h_error_sum_arr[i];
}
error_sum_mean = error_sum_total /n_data;//431
e[i] = sqrt(error_sum_mean); //432
if(e[i]<best_error){ //434
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0; //438
}
if(best_error <be){
be=best_error;
bm =dm[best_error_i];
bc= dc[best_error_i];
}else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr,"cudaFree on d_dm returned %d %s\n",error,
cudaGetErrorString(error)); //453
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr,"cudaFree on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr,"cudaFree on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr,"cudaFree on d_error_sum_arr returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
;
|
66ef463a1e2291130a0e375b786d3a31f3f93c2b.hip | // !!! This is a file automatically generated by hipify!!!
// "mpi + full reduction on gpu + timing"
#ifdef GPU
#include <hip/hip_runtime.h>
#endif
#include <mpi.h>
#include <iostream>
#include <vector>
#include "mpierr.h"
#include <cmath>
#include <algorithm>
#include <sstream>
#include <string>
#include <set>
#include <numeric>
#include <ctime>
// switches:
// #GPU : enable GPU computation
// #NO_LOG: do not printout log messages
// #REDUCE_CPU: perform final per-task reduction step on the CPU
// #DOUBLE_: double precision
// #MPI_RROBIN_: assume a round robin layout i.e process 0 -> node 0, process 1 -> node 1 ...
// #NO_GPU_MALLOC_TIME: do not take into account malloc time; usually this is part of an initialization step
// compilation with mvapich2:
// nvcc -L/apps/eiger/mvapich2/1.6/mvapich2-gnu/lib -I/apps/eiger/mvapich2/1.6/mvapich2-gnu/include \
// -libumad -lmpich -lpthread -lrdmacm -libverbs -arch=sm_20 -DGPU \
// ~/projects/gpu-training/trunk/cuda_exercises_ugo/resources/mpiscratch/mpicuda2.cu
// run:
// 1) w/o scheduler: mpiexec -np ... -hosts ... ./a.out
// 2) w/ scheduler: see mpi_cuda_pbs_ref.sh script
// note: when using mvapich2/1.6 and *not* going through the pbs scheduler it seems
// the default behavior is rrobin, using the pbs launch script the default
// behavior is "bunch" (as defined by the mvapich2 documentation)
// note: using single precision floats because that's the only supported type
// for atomics on CUDA 4
// note: experiment with different number of MPI tasks per GPU/node; using
// 256 Mi floats, 16 MPI tasks on two nodes (8 per node, 4 per GPUs)
// CUDA fails to allocate memory exaclty for one task on each node;
// Everything works fine with the same data with 8 tasks (4 per node, 2 per GPU ).
// note: it is possible to implement a discovery step to find the current MPI layout
// by checking if MPI rank 0 and 1 are on the same processor ("bunch" layout) or
// not ("scatter" layout)
//
// note: with CUDA 4.0 and architecture 2.0 atomics are available for single precision only!!!
#ifndef DOUBLE_
typedef float real_t;
#define MPI_REAL_T_ MPI_FLOAT
#else
typedef double real_t;
#define MPI_REAL_T_ MPI_DOUBLE
#endif
#ifdef GPU
const int BLOCK_SIZE = 512;
//------------------------------------------------------------------------------
// partial dot product: each thread block generates on value stored into the out array
// final reduction step must be performed on the CPU
__global__ void partial_dot_product_kernel( const real_t* v1, const real_t* v2, int N, real_t* out ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i >= N ) return;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
__syncthreads();
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
if( threadIdx.x == 0 ) out[ blockIdx.x ] = cache[ 0 ];
}
//------------------------------------------------------------------------------
//Full on-gpu reduction
// each block atomically increments this variable when done
// performing the first reduction step
__device__ unsigned int count = 0;
// shared memory used by partial_dot and sum functions
// for temporary partial reductions; declare as global variable
// because used in more than one function
__shared__ real_t cache[ BLOCK_SIZE ];
// partial dot product: each thread block produces a single value
__device__ real_t partial_dot( const real_t* v1, const real_t* v2, int N, real_t* out ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i >= N ) return real_t( 0 );
cache[ threadIdx.x ] = 0.f;
// the threads in the thread block iterate over the entire domain; iteration happens
// whenever the total number of threads is lower than the domain size
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
__syncthreads(); // required because later on the current thread is accessing
// data written by another thread
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2;
}
return cache[ 0 ];
}
// sum all elements in array; array size assumed to be equal to number of blocks
__device__ real_t sum( const real_t* v ) {
cache[ threadIdx.x ] = 0.f;
int i = threadIdx.x;
// the threads in the thread block iterate oevr the entire domain
// of size == gridDim.x == total number of blocks; iteration happens
// whenever the number of threads in a thread block is lower than
// the total number of thread blocks
while( i < gridDim.x ) {
cache[ threadIdx.x ] += v[ i ];
i += blockDim.x;
}
__syncthreads(); // required because later on the current thread is accessing
// data written by another thread
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2;
}
return cache[ 0 ];
}
// perform parallel dot product in two steps:
// 1) each block computes a single value and stores it into an array of size == number of blocks
// 2) the last block to finish step (1) performs a reduction on the array produced in the above step
// parameters:
// v1 first input vector
// v2 second input vector
// N size of input vector
// out output vector: size MUST be equal to the number of GPU blocks since it us used
// for partial reduction; result is at position 0
__global__ void dot_product_full_kernel( const real_t* v1, const real_t* v2, int N, real_t* out ) {
// true if last block to compute value
__shared__ bool lastBlock;
// each block computes a value
real_t r = partial_dot( v1, v2, N, out );
if( threadIdx.x == 0 ) {
// value is stored into output array by first thread of each block
out[ blockIdx.x ] = r;
// wait for value to be available to all the threads on the device
__threadfence();
// increment atomic counter and retrieve value
const unsigned int v = atomicInc( &count, gridDim.x );
// check if last block to perform computation
lastBlock = ( v == gridDim.x - 1 );
}
// the code below is executed by *all* threads in the block:
// make sure all the threads in the block access the correct value
// of the variable 'lastBlock'
__syncthreads();
// last block performs a the final reduction steps which produces one single value
if( lastBlock ) {
r = sum( out );
if( threadIdx.x == 0 ) {
out[ 0 ] = r;
count = 0;
}
}
}
#endif
//------------------------------------------------------------------------------
int main( int argc, char** argv ) {
int numtasks = 0;
int task = 0;
// INIT ENV
MPI_( MPI_Init( &argc, &argv ) );
MPI_( MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ) );
MPI_( MPI_Comm_size( MPI_COMM_WORLD, &numtasks ) );
MPI_( MPI_Comm_rank( MPI_COMM_WORLD, &task ) );
std::vector< char > nodeid( MPI_MAX_PROCESSOR_NAME, '\0' );
int len = 0;
MPI_( MPI_Get_processor_name( &nodeid[ 0 ], &len ) );
#ifdef MPI_RROBIN_
// RETRIEVE TOTAL NUMBER OF NODES USED, is there an easier way ?
// required to have each GPU assigned to the same number of processes
// on each node
const int SEND_NODE_TAG = 0x01;
//const int SEND_NUM_NODES = 0x10;
MPI_Request req;
MPI_( MPI_Isend( &nodeid[ 0 ], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, SEND_NODE_TAG,
MPI_COMM_WORLD, &req ) );
int node_count = -1;
if( task == 0 ) {
typedef std::set< std::string > NodeCount;
NodeCount ncount;
std::vector< char > n( MPI_MAX_PROCESSOR_NAME, '\0' );
MPI_Status s;
for( int r = 0; r != numtasks; ++r ) {
MPI_( MPI_Recv( &n[ 0 ], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, r, SEND_NODE_TAG,
MPI_COMM_WORLD, &s ) );
ncount.insert( &n[ 0 ] );
}
node_count = int( ncount.size() );
#ifndef NO_LOG
std::cout << "Number of nodes: " << node_count << std::endl;
#endif
}
// SEND INFORMATION USED FOR GPU <-> RANK MAPPING TO EACH PROCESS
// Option 1: use scatter, useful only to send per-process specific information like e.g
// the GPU to use. It is in general a more robust method to have the root process
// compute the rank -> gpu map
//std::vector< int > sendbuf( numtasks, node_count );
// MPI Scatter parameters: address of send buffer,
// per-receiving process receive buffer size,...
// send buffer size = num tasks x per-receiving-process buffer size
//MPI_( MPI_Scatter( &sendbuf[ 0 ], 1, MPI_INT, &node_count, 1, MPI_INT, 0, MPI_COMM_WORLD ) );
// Option 2: simply broadcast the number of nodes
MPI_( MPI_Bcast( &node_count, 1, MPI_INT, 0, MPI_COMM_WORLD ) );
#endif
// PER TASK DATA INIT - in the real world this is the place where data are read from file
// through the MPI_File_ functions or, less likely received from the root process
const int ARRAY_SIZE = 1024 * 1024 * 256;// * 1024 * 256; // 256 Mi floats x 2 == 2 GiB total storage
// @WARNING: ARRAY_SIZE must be evenly divisible by the number of MPI processes
const int PER_MPI_TASK_ARRAY_SIZE = ARRAY_SIZE / numtasks;
if( ARRAY_SIZE % numtasks != 0 && task == 0 ) {
std::cerr << ARRAY_SIZE << " must be evenly divisible by the number of mpi processes" << std::endl;
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
std::vector< real_t > v1( ARRAY_SIZE / numtasks, 0. );
std::vector< real_t > v2( ARRAY_SIZE / numtasks, 0. );
for( int i = 0; i != PER_MPI_TASK_ARRAY_SIZE; ++i ) {
v1[ i ] = 1;
v2[ i ] = 1;
}
std::vector< double > begins( numtasks );
std::vector< double > ends( numtasks );
double begin = clock();
MPI_( MPI_Gather( &begin, 1, MPI_DOUBLE, &begins[ 0 ], 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ) );
// PARALLEL DOT PRODUCT COMPUTATION
real_t partial_dot = 0.f;
#ifndef GPU
int t = 0;
for( t = 0; t != PER_MPI_TASK_ARRAY_SIZE; ++t ) {
partial_dot += v1[ t ] * v2[ t ];
}
//partial_dot = real_t( p );
#ifndef NO_LOG
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << " size: " << PER_MPI_TASK_ARRAY_SIZE
<< ' ' << t << " partial dot: " << partial_dot << '\n' ;
std::cout << os.str(); os.flush();
#endif
#else
// SELECT GPU = task % <num gpus on node>, note that with this
// approach it is possible to support nodes with different numbers of GPUs
int device_count = 0;
if( hipGetDeviceCount( &device_count ) != hipSuccess ) {
std::cerr << task << ' ' << hipGetErrorString( hipGetLastError() ) << " hipGetDeviceCount FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef MPI_RROBIN_
const int device = ( task / node_count ) % device_count;
#else
const int device = task % device_count;
#endif
#ifndef NO_LOG
{
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << "\tGPU: " << device << '\n';
std::cout << os.str(); os.flush();
}
#endif
if( hipSetDevice( device ) != hipSuccess ) {
std::cerr << task << ' ' << hipGetErrorString( hipGetLastError() ) << " cudaGetSetDevice FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef NO_GPU_MALLOC_TIME
double malloc_begin = clock();
#endif
real_t* dev_v1 = 0;
real_t* dev_v2 = 0;
real_t* dev_dout = 0;
if( hipMalloc( &dev_v1, sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE ) != hipSuccess ) {
std::cerr << task << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
if( hipMalloc( &dev_v2, sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE ) != hipSuccess ) {
std::cerr << task << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef NO_GPU_MALLOC_TIME
double malloc_end = clock();
begin += malloc_end - malloc_begin;
#endif
// MOVE DATA TO GPU
if( hipMemcpy( dev_v1, &v1[ 0 ], sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE,
hipMemcpyHostToDevice ) != hipSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMemcpy FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
if( hipMemcpy( dev_v2, &v2[ 0 ], sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE,
hipMemcpyHostToDevice ) != hipSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMemcpy FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
// INVOKE KERNEL
const int NUM_THREADS_PER_BLOCK = BLOCK_SIZE; // must match size of buffer used for reduction
const int NUM_BLOCKS = ::min( PER_MPI_TASK_ARRAY_SIZE / NUM_THREADS_PER_BLOCK,
0xffff ); // max number of blocks is 64k
const int PARTIAL_REDUCE_SIZE = NUM_BLOCKS;
if( hipMalloc( &dev_dout, sizeof( real_t ) * PARTIAL_REDUCE_SIZE ) != hipSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << hipGetErrorString( hipGetLastError() ) << " hipMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifndef REDUCE_GPU
hipLaunchKernelGGL(( partial_dot_product_kernel), dim3(NUM_BLOCKS), dim3(NUM_THREADS_PER_BLOCK), 0, 0, dev_v1, dev_v2, PER_MPI_TASK_ARRAY_SIZE, dev_dout );
std::vector< real_t > rdot( PARTIAL_REDUCE_SIZE );
hipMemcpy( &rdot[ 0 ], dev_dout, sizeof( real_t ) * PARTIAL_REDUCE_SIZE, hipMemcpyDeviceToHost );
partial_dot = std::accumulate( rdot.begin(), rdot.end(), 0.f );
#else
hipLaunchKernelGGL(( dot_product_full_kernel), dim3(NUM_BLOCKS), dim3(NUM_THREADS_PER_BLOCK), 0, 0, dev_v1, dev_v2, PER_MPI_TASK_ARRAY_SIZE, dev_dout );
hipMemcpy( &partial_dot, dev_dout, sizeof( real_t ) * 1, hipMemcpyDeviceToHost );
#endif
#ifndef NO_LOG
{
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << " partial dot: " << partial_dot << '\n' ;
std::cout << os.str(); os.flush();
}
#endif
#endif
// REDUCE (SUM) ALL ranks -> rank 0
real_t result = 0.;
MPI_( MPI_Reduce( &partial_dot, &result, 1, MPI_REAL_T_, MPI_SUM, 0, MPI_COMM_WORLD ) );
double end = clock();
MPI_( MPI_Gather( &end, 1, MPI_DOUBLE, &ends[ 0 ], 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ) );
const std::pair< double, double > minmax( *std::min_element( begins.begin(), begins.end() ),
*std::max_element( ends.begin(), ends.end() ) );
// IF RANK == 0 -> PRINT RESULT
if( task == 0 ) {
std::cout << "dot product result: " << result << std::endl;
std::cout << "time: " << ( minmax.second - minmax.first ) / CLOCKS_PER_SEC << 's' << std::endl;
}
#ifdef GPU
// RELEASE GPU RESOURCES
hipFree( dev_v1 );
hipFree( dev_v2 );
hipFree( dev_dout );
hipDeviceReset();
#endif
// RELEASE MPI RESOURCES
MPI_( MPI_Finalize() );
return 0;
}
| 66ef463a1e2291130a0e375b786d3a31f3f93c2b.cu | // "mpi + full reduction on gpu + timing"
#ifdef GPU
#include <cuda.h>
#endif
#include <mpi.h>
#include <iostream>
#include <vector>
#include "mpierr.h"
#include <cmath>
#include <algorithm>
#include <sstream>
#include <string>
#include <set>
#include <numeric>
#include <ctime>
// switches:
// #GPU : enable GPU computation
// #NO_LOG: do not printout log messages
// #REDUCE_CPU: perform final per-task reduction step on the CPU
// #DOUBLE_: double precision
// #MPI_RROBIN_: assume a round robin layout i.e process 0 -> node 0, process 1 -> node 1 ...
// #NO_GPU_MALLOC_TIME: do not take into account malloc time; usually this is part of an initialization step
// compilation with mvapich2:
// nvcc -L/apps/eiger/mvapich2/1.6/mvapich2-gnu/lib -I/apps/eiger/mvapich2/1.6/mvapich2-gnu/include \
// -libumad -lmpich -lpthread -lrdmacm -libverbs -arch=sm_20 -DGPU \
// ~/projects/gpu-training/trunk/cuda_exercises_ugo/resources/mpiscratch/mpicuda2.cu
// run:
// 1) w/o scheduler: mpiexec -np ... -hosts ... ./a.out
// 2) w/ scheduler: see mpi_cuda_pbs_ref.sh script
// note: when using mvapich2/1.6 and *not* going through the pbs scheduler it seems
// the default behavior is rrobin, using the pbs launch script the default
// behavior is "bunch" (as defined by the mvapich2 documentation)
// note: using single precision floats because that's the only supported type
// for atomics on CUDA 4
// note: experiment with different number of MPI tasks per GPU/node; using
// 256 Mi floats, 16 MPI tasks on two nodes (8 per node, 4 per GPUs)
// CUDA fails to allocate memory exaclty for one task on each node;
// Everything works fine with the same data with 8 tasks (4 per node, 2 per GPU ).
// note: it is possible to implement a discovery step to find the current MPI layout
// by checking if MPI rank 0 and 1 are on the same processor ("bunch" layout) or
// not ("scatter" layout)
//
// note: with CUDA 4.0 and architecture 2.0 atomics are available for single precision only!!!
#ifndef DOUBLE_
typedef float real_t;
#define MPI_REAL_T_ MPI_FLOAT
#else
typedef double real_t;
#define MPI_REAL_T_ MPI_DOUBLE
#endif
#ifdef GPU
const int BLOCK_SIZE = 512;
//------------------------------------------------------------------------------
// partial dot product: each thread block generates on value stored into the out array
// final reduction step must be performed on the CPU
__global__ void partial_dot_product_kernel( const real_t* v1, const real_t* v2, int N, real_t* out ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i >= N ) return;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
__syncthreads();
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
if( threadIdx.x == 0 ) out[ blockIdx.x ] = cache[ 0 ];
}
//------------------------------------------------------------------------------
//Full on-gpu reduction
// each block atomically increments this variable when done
// performing the first reduction step
__device__ unsigned int count = 0;
// shared memory used by partial_dot and sum functions
// for temporary partial reductions; declare as global variable
// because used in more than one function
__shared__ real_t cache[ BLOCK_SIZE ];
// partial dot product: each thread block produces a single value
__device__ real_t partial_dot( const real_t* v1, const real_t* v2, int N, real_t* out ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i >= N ) return real_t( 0 );
cache[ threadIdx.x ] = 0.f;
// the threads in the thread block iterate over the entire domain; iteration happens
// whenever the total number of threads is lower than the domain size
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
__syncthreads(); // required because later on the current thread is accessing
// data written by another thread
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2;
}
return cache[ 0 ];
}
// sum all elements in array; array size assumed to be equal to number of blocks
__device__ real_t sum( const real_t* v ) {
cache[ threadIdx.x ] = 0.f;
int i = threadIdx.x;
// the threads in the thread block iterate oevr the entire domain
// of size == gridDim.x == total number of blocks; iteration happens
// whenever the number of threads in a thread block is lower than
// the total number of thread blocks
while( i < gridDim.x ) {
cache[ threadIdx.x ] += v[ i ];
i += blockDim.x;
}
__syncthreads(); // required because later on the current thread is accessing
// data written by another thread
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2;
}
return cache[ 0 ];
}
// perform parallel dot product in two steps:
// 1) each block computes a single value and stores it into an array of size == number of blocks
// 2) the last block to finish step (1) performs a reduction on the array produced in the above step
// parameters:
// v1 first input vector
// v2 second input vector
// N size of input vector
// out output vector: size MUST be equal to the number of GPU blocks since it us used
// for partial reduction; result is at position 0
__global__ void dot_product_full_kernel( const real_t* v1, const real_t* v2, int N, real_t* out ) {
// true if last block to compute value
__shared__ bool lastBlock;
// each block computes a value
real_t r = partial_dot( v1, v2, N, out );
if( threadIdx.x == 0 ) {
// value is stored into output array by first thread of each block
out[ blockIdx.x ] = r;
// wait for value to be available to all the threads on the device
__threadfence();
// increment atomic counter and retrieve value
const unsigned int v = atomicInc( &count, gridDim.x );
// check if last block to perform computation
lastBlock = ( v == gridDim.x - 1 );
}
// the code below is executed by *all* threads in the block:
// make sure all the threads in the block access the correct value
// of the variable 'lastBlock'
__syncthreads();
// last block performs a the final reduction steps which produces one single value
if( lastBlock ) {
r = sum( out );
if( threadIdx.x == 0 ) {
out[ 0 ] = r;
count = 0;
}
}
}
#endif
//------------------------------------------------------------------------------
int main( int argc, char** argv ) {
int numtasks = 0;
int task = 0;
// INIT ENV
MPI_( MPI_Init( &argc, &argv ) );
MPI_( MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN ) );
MPI_( MPI_Comm_size( MPI_COMM_WORLD, &numtasks ) );
MPI_( MPI_Comm_rank( MPI_COMM_WORLD, &task ) );
std::vector< char > nodeid( MPI_MAX_PROCESSOR_NAME, '\0' );
int len = 0;
MPI_( MPI_Get_processor_name( &nodeid[ 0 ], &len ) );
#ifdef MPI_RROBIN_
// RETRIEVE TOTAL NUMBER OF NODES USED, is there an easier way ?
// required to have each GPU assigned to the same number of processes
// on each node
const int SEND_NODE_TAG = 0x01;
//const int SEND_NUM_NODES = 0x10;
MPI_Request req;
MPI_( MPI_Isend( &nodeid[ 0 ], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, SEND_NODE_TAG,
MPI_COMM_WORLD, &req ) );
int node_count = -1;
if( task == 0 ) {
typedef std::set< std::string > NodeCount;
NodeCount ncount;
std::vector< char > n( MPI_MAX_PROCESSOR_NAME, '\0' );
MPI_Status s;
for( int r = 0; r != numtasks; ++r ) {
MPI_( MPI_Recv( &n[ 0 ], MPI_MAX_PROCESSOR_NAME, MPI_CHAR, r, SEND_NODE_TAG,
MPI_COMM_WORLD, &s ) );
ncount.insert( &n[ 0 ] );
}
node_count = int( ncount.size() );
#ifndef NO_LOG
std::cout << "Number of nodes: " << node_count << std::endl;
#endif
}
// SEND INFORMATION USED FOR GPU <-> RANK MAPPING TO EACH PROCESS
// Option 1: use scatter, useful only to send per-process specific information like e.g
// the GPU to use. It is in general a more robust method to have the root process
// compute the rank -> gpu map
//std::vector< int > sendbuf( numtasks, node_count );
// MPI Scatter parameters: address of send buffer,
// per-receiving process receive buffer size,...
// send buffer size = num tasks x per-receiving-process buffer size
//MPI_( MPI_Scatter( &sendbuf[ 0 ], 1, MPI_INT, &node_count, 1, MPI_INT, 0, MPI_COMM_WORLD ) );
// Option 2: simply broadcast the number of nodes
MPI_( MPI_Bcast( &node_count, 1, MPI_INT, 0, MPI_COMM_WORLD ) );
#endif
// PER TASK DATA INIT - in the real world this is the place where data are read from file
// through the MPI_File_ functions or, less likely received from the root process
const int ARRAY_SIZE = 1024 * 1024 * 256;// * 1024 * 256; // 256 Mi floats x 2 == 2 GiB total storage
// @WARNING: ARRAY_SIZE must be evenly divisible by the number of MPI processes
const int PER_MPI_TASK_ARRAY_SIZE = ARRAY_SIZE / numtasks;
if( ARRAY_SIZE % numtasks != 0 && task == 0 ) {
std::cerr << ARRAY_SIZE << " must be evenly divisible by the number of mpi processes" << std::endl;
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
std::vector< real_t > v1( ARRAY_SIZE / numtasks, 0. );
std::vector< real_t > v2( ARRAY_SIZE / numtasks, 0. );
for( int i = 0; i != PER_MPI_TASK_ARRAY_SIZE; ++i ) {
v1[ i ] = 1;
v2[ i ] = 1;
}
std::vector< double > begins( numtasks );
std::vector< double > ends( numtasks );
double begin = clock();
MPI_( MPI_Gather( &begin, 1, MPI_DOUBLE, &begins[ 0 ], 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ) );
// PARALLEL DOT PRODUCT COMPUTATION
real_t partial_dot = 0.f;
#ifndef GPU
int t = 0;
for( t = 0; t != PER_MPI_TASK_ARRAY_SIZE; ++t ) {
partial_dot += v1[ t ] * v2[ t ];
}
//partial_dot = real_t( p );
#ifndef NO_LOG
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << " size: " << PER_MPI_TASK_ARRAY_SIZE
<< ' ' << t << " partial dot: " << partial_dot << '\n' ;
std::cout << os.str(); os.flush();
#endif
#else
// SELECT GPU = task % <num gpus on node>, note that with this
// approach it is possible to support nodes with different numbers of GPUs
int device_count = 0;
if( cudaGetDeviceCount( &device_count ) != cudaSuccess ) {
std::cerr << task << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaGetDeviceCount FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef MPI_RROBIN_
const int device = ( task / node_count ) % device_count;
#else
const int device = task % device_count;
#endif
#ifndef NO_LOG
{
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << "\tGPU: " << device << '\n';
std::cout << os.str(); os.flush();
}
#endif
if( cudaSetDevice( device ) != cudaSuccess ) {
std::cerr << task << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaGetSetDevice FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef NO_GPU_MALLOC_TIME
double malloc_begin = clock();
#endif
real_t* dev_v1 = 0;
real_t* dev_v2 = 0;
real_t* dev_dout = 0;
if( cudaMalloc( &dev_v1, sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE ) != cudaSuccess ) {
std::cerr << task << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
if( cudaMalloc( &dev_v2, sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE ) != cudaSuccess ) {
std::cerr << task << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifdef NO_GPU_MALLOC_TIME
double malloc_end = clock();
begin += malloc_end - malloc_begin;
#endif
// MOVE DATA TO GPU
if( cudaMemcpy( dev_v1, &v1[ 0 ], sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE,
cudaMemcpyHostToDevice ) != cudaSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMemcpy FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
if( cudaMemcpy( dev_v2, &v2[ 0 ], sizeof( real_t ) * PER_MPI_TASK_ARRAY_SIZE,
cudaMemcpyHostToDevice ) != cudaSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMemcpy FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
// INVOKE KERNEL
const int NUM_THREADS_PER_BLOCK = BLOCK_SIZE; // must match size of buffer used for reduction
const int NUM_BLOCKS = std::min( PER_MPI_TASK_ARRAY_SIZE / NUM_THREADS_PER_BLOCK,
0xffff ); // max number of blocks is 64k
const int PARTIAL_REDUCE_SIZE = NUM_BLOCKS;
if( cudaMalloc( &dev_dout, sizeof( real_t ) * PARTIAL_REDUCE_SIZE ) != cudaSuccess ) {
std::cerr << task << ' ' << __LINE__ << ' ' << cudaGetErrorString( cudaGetLastError() ) << " cudaMalloc FAILED\n";
MPI_( MPI_Abort( MPI_COMM_WORLD, 1 ) );
return 1;
}
#ifndef REDUCE_GPU
partial_dot_product_kernel<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>( dev_v1, dev_v2, PER_MPI_TASK_ARRAY_SIZE, dev_dout );
std::vector< real_t > rdot( PARTIAL_REDUCE_SIZE );
cudaMemcpy( &rdot[ 0 ], dev_dout, sizeof( real_t ) * PARTIAL_REDUCE_SIZE, cudaMemcpyDeviceToHost );
partial_dot = std::accumulate( rdot.begin(), rdot.end(), 0.f );
#else
dot_product_full_kernel<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK>>>( dev_v1, dev_v2, PER_MPI_TASK_ARRAY_SIZE, dev_dout );
cudaMemcpy( &partial_dot, dev_dout, sizeof( real_t ) * 1, cudaMemcpyDeviceToHost );
#endif
#ifndef NO_LOG
{
std::ostringstream os;
os << &nodeid[ 0 ] << " - rank: " << task << " partial dot: " << partial_dot << '\n' ;
std::cout << os.str(); os.flush();
}
#endif
#endif
// REDUCE (SUM) ALL ranks -> rank 0
real_t result = 0.;
MPI_( MPI_Reduce( &partial_dot, &result, 1, MPI_REAL_T_, MPI_SUM, 0, MPI_COMM_WORLD ) );
double end = clock();
MPI_( MPI_Gather( &end, 1, MPI_DOUBLE, &ends[ 0 ], 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ) );
const std::pair< double, double > minmax( *std::min_element( begins.begin(), begins.end() ),
*std::max_element( ends.begin(), ends.end() ) );
// IF RANK == 0 -> PRINT RESULT
if( task == 0 ) {
std::cout << "dot product result: " << result << std::endl;
std::cout << "time: " << ( minmax.second - minmax.first ) / CLOCKS_PER_SEC << 's' << std::endl;
}
#ifdef GPU
// RELEASE GPU RESOURCES
cudaFree( dev_v1 );
cudaFree( dev_v2 );
cudaFree( dev_dout );
cudaDeviceReset();
#endif
// RELEASE MPI RESOURCES
MPI_( MPI_Finalize() );
return 0;
}
|
948629edcf84198f9562f7e033f1bcd73fb537e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorRandom.h"
#include "THHDeviceUtils.cuh"
#include "THHGeneral.h"
#include "THHTensorCopy.h"
#include "THHTensorMath.h"
#include "THHReduceApplyUtils.cuh"
#include "THHTensorRandom.cuh"
#include <thrust/functional.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_mtgp32_host.h>
#include <rocrand/rocrand_mtgp32_11213.h>
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
Generator* THCRandom_getGenerator(THCState* state);
/* Sets up generator. Allocates but does not create the generator states. */
__host__ void initializeGenerator(THCState *state, Generator* gen)
{
THCudaCheck(THCudaMalloc(state, (void**)&gen->gen_states, MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t)));
THCudaCheck(THCudaMalloc(state, (void**)&gen->kernel_params, sizeof(mtgp32_kernel_params_t)));
}
/* Creates a new generator state given the seed. */
__host__ void createGeneratorState(Generator* gen, unsigned long long seed)
{
if (hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->kernel_params) != HIPRAND_STATUS_SUCCESS)
{
THError("Creating MTGP constants failed.");
}
if (hiprandMakeMTGP32KernelState(gen->gen_states, mtgp32dc_params_fast_11213,
gen->kernel_params, MAX_NUM_BLOCKS, seed) != HIPRAND_STATUS_SUCCESS)
{
THError("Creating MTGP kernel state failed.");
}
}
__host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state)
{
Generator* gen = THCRandom_getGenerator(state);
// The RNG state comprises the MTPG32 states and the seed.
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t total_size = states_size + seed_size;
THByteTensor_resize1d(rng_state, total_size);
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(hipMemcpy(THByteTensor_data(rng_state), gen->gen_states,
states_size, hipMemcpyDeviceToHost));
memcpy(THByteTensor_data(rng_state) + states_size, &gen->initial_seed, seed_size);
}
__global__ void set_rngstate_kernel(hiprandStateMtgp32_t *state, mtgp32_kernel_params_t *kernel)
{
state[threadIdx.x].k = kernel;
}
__host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state)
{
Generator* gen = THCRandom_getGenerator(state);
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t total_size = states_size + seed_size;
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(hipMemcpy(gen->gen_states, THByteTensor_data(rng_state),
states_size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( set_rngstate_kernel), dim3(1), dim3(MAX_NUM_BLOCKS), 0, THCState_getCurrentStream(state),
gen->gen_states, gen->kernel_params);
memcpy(&gen->initial_seed, THByteTensor_data(rng_state) + states_size, seed_size);
}
#define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
#define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1, ARG2) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
GENERATE_KERNEL2(generate_uniform, float, double a, double b, float, hiprand_uniform, x * (b-a) + a)
GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, hiprand_uniform_double, x * (b-a) + a)
GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, hiprand_normal, (x * stdv) + mean)
GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, hiprand_normal_double, (x * stdv) + mean)
GENERATE_KERNEL1(generate_exponential, float, double lambda, float, hiprand_uniform, (float)(-1. / lambda * log(1-x)))
GENERATE_KERNEL1(generate_exponential, double, double lambda, double, hiprand_uniform_double, (double)(-1. / lambda * log(1-x)))
GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, hiprand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5))))
GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, hiprand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5))))
#ifdef CUDA_HALF_TENSOR
GENERATE_KERNEL2(generate_uniform, half, double a, double b, float, hiprand_uniform, (ScalarConvert<float, half>::to(x * (b-a) + a)))
GENERATE_KERNEL2(generate_normal, half, double mean, double stdv, float, hiprand_normal, (ScalarConvert<float, half>::to((x * stdv) + mean)))
GENERATE_KERNEL1(generate_exponential, half, double lambda, float, hiprand_uniform, (ScalarConvert<float, half>::to((float)(-1. / lambda * log(1-x)))))
GENERATE_KERNEL2(generate_cauchy, half, double median, double sigma, float, hiprand_uniform, (ScalarConvert<float, half>::to((float)(median + sigma * tan(M_PI*(x-0.5))))))
#endif // CUDA_HALF_TENSOR
#include "generic/THCTensorRandom.cu"
#include "THHGenerateAllTypes.h"
#undef GENERATE_KERNEL1
#undef GENERATE_KERNEL2
| 948629edcf84198f9562f7e033f1bcd73fb537e4.cu | #include "THCTensorRandom.h"
#include "THCDeviceUtils.cuh"
#include "THCGeneral.h"
#include "THCTensorCopy.h"
#include "THCTensorMath.h"
#include "THCReduceApplyUtils.cuh"
#include "THCTensorRandom.cuh"
#include <thrust/functional.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_mtgp32_host.h>
#include <curand_mtgp32dc_p_11213.h>
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
Generator* THCRandom_getGenerator(THCState* state);
/* Sets up generator. Allocates but does not create the generator states. */
__host__ void initializeGenerator(THCState *state, Generator* gen)
{
THCudaCheck(THCudaMalloc(state, (void**)&gen->gen_states, MAX_NUM_BLOCKS * sizeof(curandStateMtgp32)));
THCudaCheck(THCudaMalloc(state, (void**)&gen->kernel_params, sizeof(mtgp32_kernel_params)));
}
/* Creates a new generator state given the seed. */
__host__ void createGeneratorState(Generator* gen, unsigned long long seed)
{
if (curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->kernel_params) != CURAND_STATUS_SUCCESS)
{
THError("Creating MTGP constants failed.");
}
if (curandMakeMTGP32KernelState(gen->gen_states, mtgp32dc_params_fast_11213,
gen->kernel_params, MAX_NUM_BLOCKS, seed) != CURAND_STATUS_SUCCESS)
{
THError("Creating MTGP kernel state failed.");
}
}
__host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state)
{
Generator* gen = THCRandom_getGenerator(state);
// The RNG state comprises the MTPG32 states and the seed.
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t total_size = states_size + seed_size;
THByteTensor_resize1d(rng_state, total_size);
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(cudaMemcpy(THByteTensor_data(rng_state), gen->gen_states,
states_size, cudaMemcpyDeviceToHost));
memcpy(THByteTensor_data(rng_state) + states_size, &gen->initial_seed, seed_size);
}
__global__ void set_rngstate_kernel(curandStateMtgp32 *state, mtgp32_kernel_params *kernel)
{
state[threadIdx.x].k = kernel;
}
__host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state)
{
Generator* gen = THCRandom_getGenerator(state);
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t total_size = states_size + seed_size;
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(cudaMemcpy(gen->gen_states, THByteTensor_data(rng_state),
states_size, cudaMemcpyHostToDevice));
set_rngstate_kernel<<<1, MAX_NUM_BLOCKS, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, gen->kernel_params);
memcpy(&gen->initial_seed, THByteTensor_data(rng_state) + states_size, seed_size);
}
#define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
#define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1, ARG2) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
GENERATE_KERNEL2(generate_uniform, float, double a, double b, float, curand_uniform, x * (b-a) + a)
GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, curand_uniform_double, x * (b-a) + a)
GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, curand_normal, (x * stdv) + mean)
GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, curand_normal_double, (x * stdv) + mean)
GENERATE_KERNEL1(generate_exponential, float, double lambda, float, curand_uniform, (float)(-1. / lambda * log(1-x)))
GENERATE_KERNEL1(generate_exponential, double, double lambda, double, curand_uniform_double, (double)(-1. / lambda * log(1-x)))
GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, curand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5))))
GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, curand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5))))
#ifdef CUDA_HALF_TENSOR
GENERATE_KERNEL2(generate_uniform, half, double a, double b, float, curand_uniform, (ScalarConvert<float, half>::to(x * (b-a) + a)))
GENERATE_KERNEL2(generate_normal, half, double mean, double stdv, float, curand_normal, (ScalarConvert<float, half>::to((x * stdv) + mean)))
GENERATE_KERNEL1(generate_exponential, half, double lambda, float, curand_uniform, (ScalarConvert<float, half>::to((float)(-1. / lambda * log(1-x)))))
GENERATE_KERNEL2(generate_cauchy, half, double median, double sigma, float, curand_uniform, (ScalarConvert<float, half>::to((float)(median + sigma * tan(M_PI*(x-0.5))))))
#endif // CUDA_HALF_TENSOR
#include "generic/THCTensorRandom.cu"
#include "THCGenerateAllTypes.h"
#undef GENERATE_KERNEL1
#undef GENERATE_KERNEL2
|
980d8716bdf6a94c3343eb44939e6088bd10a10f.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
*
* (C) Copyright 2007 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
***************************************************************************/
/*
* C code for creating the Q data structure for fast convolution-based
* Hessian multiplication for arbitrary k-space trajectories.
*
* Inputs:
* kx - VECTOR of kx values, same length as ky and kz
* ky - VECTOR of ky values, same length as kx and kz
* kz - VECTOR of kz values, same length as kx and ky
* x - VECTOR of x values, same length as y and z
* y - VECTOR of y values, same length as x and z
* z - VECTOR of z values, same length as x and y
* phi - VECTOR of the Fourier transform of the spatial basis
* function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz.
*
* recommended g++ options:
* -O3 -lm -ffast-math -funroll-all-loops
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <malloc.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "file.h"
#include "computeQ.hip"
static void
setupMemoryGPU(int num, int size, float*& dev_ptr, float*& host_ptr)
{
hipMalloc ((void **) &dev_ptr, num * size);
HIP_ERRCK;
hipMemcpy (dev_ptr, host_ptr, num * size, hipMemcpyHostToDevice);
HIP_ERRCK;
}
static void
cleanupMemoryGPU(int num, int size, float *& dev_ptr, float * host_ptr)
{
hipMemcpy (host_ptr, dev_ptr, num * size, hipMemcpyDeviceToHost);
HIP_ERRCK;
hipFree(dev_ptr);
HIP_ERRCK;
}
int main (int argc, char *argv[]) {
char* inputFileName = argv[1];
char* outputFileName = argv[2];
int numX, numK; /* Number of X and K values */
float *kx, *ky, *kz; /* K trajectory (3D vectors) */
float *x, *y, *z; /* X coordinates (3D vectors) */
float *phiR, *phiI; /* Phi values (complex) */
float *phiMag; /* Magnitude of Phi */
float *Qr, *Qi; /* Q signal (complex) */
struct kValues* kVals;
/* Read in data */
inputData(inputFileName,
&numK, &numX,
&kx, &ky, &kz,
&x, &y, &z,
&phiR, &phiI);
printf("%d pixels in output; %d samples in trajectory\n", numX, numK);
/* Create CPU data structures */
createDataStructsCPU(numK, numX, &phiMag, &Qr, &Qi);
/* GPU section 1 (precompute PhiMag) */
auto start = std::chrono::steady_clock::now();
{
/* Mirror several data structures on the device */
float *phiR_d, *phiI_d;
float *phiMag_d;
setupMemoryGPU(numK, sizeof(float), phiR_d, phiR);
setupMemoryGPU(numK, sizeof(float), phiI_d, phiI);
hipMalloc((void **)&phiMag_d, numK * sizeof(float));
HIP_ERRCK;
computePhiMag_GPU(numK, phiR_d, phiI_d, phiMag_d);
cleanupMemoryGPU(numK, sizeof(float), phiMag_d, phiMag);
hipFree(phiR_d);
hipFree(phiI_d);
}
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("computePhiMag execution time: %f s\n", time * 1e-9);
kVals = (struct kValues*)calloc(numK, sizeof (struct kValues));
for (int k = 0; k < numK; k++) {
kVals[k].Kx = kx[k];
kVals[k].Ky = ky[k];
kVals[k].Kz = kz[k];
kVals[k].PhiMag = phiMag[k];
}
/* GPU section 2 */
start = std::chrono::steady_clock::now();
{
float *x_d, *y_d, *z_d;
float *Qr_d, *Qi_d;
setupMemoryGPU(numX, sizeof(float), x_d, x);
setupMemoryGPU(numX, sizeof(float), y_d, y);
setupMemoryGPU(numX, sizeof(float), z_d, z);
hipMalloc((void **)&Qr_d, numX * sizeof(float));
HIP_ERRCK;
hipMemset((void *)Qr_d, 0, numX * sizeof(float));
hipMalloc((void **)&Qi_d, numX * sizeof(float));
HIP_ERRCK;
hipMemset((void *)Qi_d, 0, numX * sizeof(float));
computeQ_GPU(numK, numX, x_d, y_d, z_d, kVals, Qr_d, Qi_d);
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
cleanupMemoryGPU(numX, sizeof(float), Qr_d, Qr);
cleanupMemoryGPU(numX, sizeof(float), Qi_d, Qi);
}
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("computeQ execution time: %f s\n", time * 1e-9);
outputData(outputFileName, Qr, Qi, numX);
free(phiMag);
free (kx);
free (ky);
free (kz);
free (x);
free (y);
free (z);
free (phiR);
free (phiI);
free (kVals);
free (Qr);
free (Qi);
return 0;
}
| 980d8716bdf6a94c3343eb44939e6088bd10a10f.cu | /***************************************************************************
*
* (C) Copyright 2007 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
***************************************************************************/
/*
* C code for creating the Q data structure for fast convolution-based
* Hessian multiplication for arbitrary k-space trajectories.
*
* Inputs:
* kx - VECTOR of kx values, same length as ky and kz
* ky - VECTOR of ky values, same length as kx and kz
* kz - VECTOR of kz values, same length as kx and ky
* x - VECTOR of x values, same length as y and z
* y - VECTOR of y values, same length as x and z
* z - VECTOR of z values, same length as x and y
* phi - VECTOR of the Fourier transform of the spatial basis
* function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz.
*
* recommended g++ options:
* -O3 -lm -ffast-math -funroll-all-loops
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <malloc.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "file.h"
#include "computeQ.cu"
static void
setupMemoryGPU(int num, int size, float*& dev_ptr, float*& host_ptr)
{
hipMalloc ((void **) &dev_ptr, num * size);
HIP_ERRCK;
hipMemcpy (dev_ptr, host_ptr, num * size, hipMemcpyHostToDevice);
HIP_ERRCK;
}
static void
cleanupMemoryGPU(int num, int size, float *& dev_ptr, float * host_ptr)
{
hipMemcpy (host_ptr, dev_ptr, num * size, hipMemcpyDeviceToHost);
HIP_ERRCK;
hipFree(dev_ptr);
HIP_ERRCK;
}
int main (int argc, char *argv[]) {
char* inputFileName = argv[1];
char* outputFileName = argv[2];
int numX, numK; /* Number of X and K values */
float *kx, *ky, *kz; /* K trajectory (3D vectors) */
float *x, *y, *z; /* X coordinates (3D vectors) */
float *phiR, *phiI; /* Phi values (complex) */
float *phiMag; /* Magnitude of Phi */
float *Qr, *Qi; /* Q signal (complex) */
struct kValues* kVals;
/* Read in data */
inputData(inputFileName,
&numK, &numX,
&kx, &ky, &kz,
&x, &y, &z,
&phiR, &phiI);
printf("%d pixels in output; %d samples in trajectory\n", numX, numK);
/* Create CPU data structures */
createDataStructsCPU(numK, numX, &phiMag, &Qr, &Qi);
/* GPU section 1 (precompute PhiMag) */
auto start = std::chrono::steady_clock::now();
{
/* Mirror several data structures on the device */
float *phiR_d, *phiI_d;
float *phiMag_d;
setupMemoryGPU(numK, sizeof(float), phiR_d, phiR);
setupMemoryGPU(numK, sizeof(float), phiI_d, phiI);
hipMalloc((void **)&phiMag_d, numK * sizeof(float));
HIP_ERRCK;
computePhiMag_GPU(numK, phiR_d, phiI_d, phiMag_d);
cleanupMemoryGPU(numK, sizeof(float), phiMag_d, phiMag);
hipFree(phiR_d);
hipFree(phiI_d);
}
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("computePhiMag execution time: %f s\n", time * 1e-9);
kVals = (struct kValues*)calloc(numK, sizeof (struct kValues));
for (int k = 0; k < numK; k++) {
kVals[k].Kx = kx[k];
kVals[k].Ky = ky[k];
kVals[k].Kz = kz[k];
kVals[k].PhiMag = phiMag[k];
}
/* GPU section 2 */
start = std::chrono::steady_clock::now();
{
float *x_d, *y_d, *z_d;
float *Qr_d, *Qi_d;
setupMemoryGPU(numX, sizeof(float), x_d, x);
setupMemoryGPU(numX, sizeof(float), y_d, y);
setupMemoryGPU(numX, sizeof(float), z_d, z);
hipMalloc((void **)&Qr_d, numX * sizeof(float));
HIP_ERRCK;
hipMemset((void *)Qr_d, 0, numX * sizeof(float));
hipMalloc((void **)&Qi_d, numX * sizeof(float));
HIP_ERRCK;
hipMemset((void *)Qi_d, 0, numX * sizeof(float));
computeQ_GPU(numK, numX, x_d, y_d, z_d, kVals, Qr_d, Qi_d);
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
cleanupMemoryGPU(numX, sizeof(float), Qr_d, Qr);
cleanupMemoryGPU(numX, sizeof(float), Qi_d, Qi);
}
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("computeQ execution time: %f s\n", time * 1e-9);
outputData(outputFileName, Qr, Qi, numX);
free(phiMag);
free (kx);
free (ky);
free (kz);
free (x);
free (y);
free (z);
free (phiR);
free (phiI);
free (kVals);
free (Qr);
free (Qi);
return 0;
}
|
caf6d2cd532e07cb318cad1e8516f1b6ae481867.hip | // !!! This is a file automatically generated by hipify!!!
/*CWM HPC Part B Assignment: Monte Carlo Method for calculating pi value on GPU
2021/5/58 Jianhao Yuan */
// reference: https://blog.csdn.net/ichocolatekapa/article/details/18960223
//import libs
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
//hiprand for random points generate
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
//Define constants (use 256 threads, and max trial times: 2000000)
#define MAX_THREAD 256
#define MAX_COUNT 2000000
//Kernel
__global__ void get_pi(float *res,int *count){
//declare variables:
//initial # points in 1/4 circle; total number of random point generated: n; loop index:i
int a=0, index_x = threadIdx.x, n = *count,i;
// declare coordinate variables x,y
float x, y;
// result for pi record
res += index_x;
//use hiprand to get random points
hiprandState_t s;
hiprand_init(42, index_x, 0, &s);
for (i = 1; i <= n; i++) {
//random generate in 1*1 square
x = hiprand_uniform(&s);
y = hiprand_uniform(&s);
//count in if point locate in 1/4 circle
if (pow(x, 2) + pow(y, 2) <= 1) {
a++;
}
//get pi value
*res = 4 * (float)a / (float)n;
//synchronzie threads
__syncthreads();
}
}
int main(void){
// declare variables: host pi value, device pi value, actual pi value, error between
float *h_pi, *d_pi, pi=0, err;
//count(both host&device);loop index needed
int maxThread = MAX_THREAD, *h_count, *d_count, i;
//allocate memory for host
h_pi = (float *)malloc(sizeof(float) * maxThread);
h_count = (int *)malloc(sizeof(int) * 1);
//allocate memory for device
hipMalloc((void **)&d_pi, sizeof(float) * maxThread);
hipMalloc((void **)&d_count, sizeof(int) * 1);
//initialize count number on host
h_count[0] = MAX_COUNT;
//get count value to device
hipMemcpy(d_count, h_count, sizeof(int) * 1, hipMemcpyHostToDevice);
//execute kernel
hipLaunchKernelGGL(( get_pi), dim3(1), dim3(maxThread), 0, 0, d_pi, d_count);
//get pi value back to host
hipMemcpy(h_pi, d_pi, sizeof(float) * maxThread,hipMemcpyDeviceToHost);
//average over 512 threads
for (i = 0; i < maxThread; i++) pi += h_pi[i];
pi = pi / maxThread;
//Find error
err = pi - (float)M_PI;
if (err < 0) {
err = -err;
}
//print output
printf("Points: %d, Generated : %f, Error: %.0fe-6\n",h_count[0] * maxThread, pi, err * 1000000);
//free memory on host
free(h_pi);
free(h_count);
//free memory on device
hipFree(d_pi);
hipFree(d_count);
//end
return 0;
}
| caf6d2cd532e07cb318cad1e8516f1b6ae481867.cu | /*CWM HPC Part B Assignment: Monte Carlo Method for calculating pi value on GPU
2021/5/58 Jianhao Yuan */
// reference: https://blog.csdn.net/ichocolatekapa/article/details/18960223
//import libs
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
//curand for random points generate
#include <curand.h>
#include <curand_kernel.h>
//Define constants (use 256 threads, and max trial times: 2000000)
#define MAX_THREAD 256
#define MAX_COUNT 2000000
//Kernel
__global__ void get_pi(float *res,int *count){
//declare variables:
//initial # points in 1/4 circle; total number of random point generated: n; loop index:i
int a=0, index_x = threadIdx.x, n = *count,i;
// declare coordinate variables x,y
float x, y;
// result for pi record
res += index_x;
//use curand to get random points
curandState s;
curand_init(42, index_x, 0, &s);
for (i = 1; i <= n; i++) {
//random generate in 1*1 square
x = curand_uniform(&s);
y = curand_uniform(&s);
//count in if point locate in 1/4 circle
if (pow(x, 2) + pow(y, 2) <= 1) {
a++;
}
//get pi value
*res = 4 * (float)a / (float)n;
//synchronzie threads
__syncthreads();
}
}
int main(void){
// declare variables: host pi value, device pi value, actual pi value, error between
float *h_pi, *d_pi, pi=0, err;
//count(both host&device);loop index needed
int maxThread = MAX_THREAD, *h_count, *d_count, i;
//allocate memory for host
h_pi = (float *)malloc(sizeof(float) * maxThread);
h_count = (int *)malloc(sizeof(int) * 1);
//allocate memory for device
cudaMalloc((void **)&d_pi, sizeof(float) * maxThread);
cudaMalloc((void **)&d_count, sizeof(int) * 1);
//initialize count number on host
h_count[0] = MAX_COUNT;
//get count value to device
cudaMemcpy(d_count, h_count, sizeof(int) * 1, cudaMemcpyHostToDevice);
//execute kernel
get_pi<<<1, maxThread>>> (d_pi, d_count);
//get pi value back to host
cudaMemcpy(h_pi, d_pi, sizeof(float) * maxThread,cudaMemcpyDeviceToHost);
//average over 512 threads
for (i = 0; i < maxThread; i++) pi += h_pi[i];
pi = pi / maxThread;
//Find error
err = pi - (float)M_PI;
if (err < 0) {
err = -err;
}
//print output
printf("Points: %d, Generated π: %f, Error: %.0fe-6\n",h_count[0] * maxThread, pi, err * 1000000);
//free memory on host
free(h_pi);
free(h_count);
//free memory on device
cudaFree(d_pi);
cudaFree(d_count);
//end
return 0;
}
|
4e8992a82a0e59d70665ce60c2a2f083c07ab16f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#define N 100 // total number of items in vectors
#define nthreads 4 // total number of threads in a block
__global__ void estimatepi(int n, int *sum)
{
__shared__ int counter[nthreads];
int threadID;
threadID = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int seed = threadID;
hiprandState_t s;
hiprand_init(seed, 0, 0, &s);
if(threadID < n){
double x, y, diff, angle;
int t;
counter[threadIdx.x] = 0;
for (t = 0; t<n; t++){
x = hiprand_uniform(&s); //hiprand
y = hiprand_uniform(&s); //hiprand
while(x*x + y*y > 1){
x = hiprand_uniform(&s); //hiprand
y = hiprand_uniform(&s); //rand
}
angle = atan2 ( y, x ); //use inverse tan;
diff = hiprand_uniform(&s);
if(diff <= sin (angle) *2){
counter[threadIdx.x] = counter[threadIdx.x] + 1;
}
}
if(threadIdx.x == 0){
sum[blockIdx.x] = 0;
for(int i=0; i<nthreads; i++) {
sum[blockIdx.x] = sum[blockIdx.x] + counter[i];
}
}
}
}
int main()
{
srand(time(NULL));
int *sum_h;
int *sum_d;
sum_h = (int*)malloc( N* sizeof(int));
hipMalloc((void**)&sum_d, N * sizeof(int));
int nblocks = (N + nthreads - 1)/nthreads;
hipLaunchKernelGGL(( estimatepi), dim3(nblocks),dim3(nthreads), 0, 0, N,sum_d);
hipMemcpy(sum_h, sum_d, N * sizeof(int), hipMemcpyDeviceToHost);
int success = 0;
for(int i = 0; i < nblocks; i++){
success = sum_h[i] + success;
}
printf("trials === %d", N * nblocks * nthreads );
printf(" success === %d\n", success);
double pi_estimate = 2 * N * nthreads * nblocks/( double )success;
printf("pi_estimate == %f", pi_estimate);
printf("\n");
hipFree(sum_d);
free(sum_h);
}
| 4e8992a82a0e59d70665ce60c2a2f083c07ab16f.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand_kernel.h>
#define N 100 // total number of items in vectors
#define nthreads 4 // total number of threads in a block
__global__ void estimatepi(int n, int *sum)
{
__shared__ int counter[nthreads];
int threadID;
threadID = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int seed = threadID;
curandState s;
curand_init(seed, 0, 0, &s);
if(threadID < n){
double x, y, diff, angle;
int t;
counter[threadIdx.x] = 0;
for (t = 0; t<n; t++){
x = curand_uniform(&s); //curand
y = curand_uniform(&s); //curand
while(x*x + y*y > 1){
x = curand_uniform(&s); //curand
y = curand_uniform(&s); //rand
}
angle = atan2 ( y, x ); //use inverse tan;
diff = curand_uniform(&s);
if(diff <= sin (angle) *2){
counter[threadIdx.x] = counter[threadIdx.x] + 1;
}
}
if(threadIdx.x == 0){
sum[blockIdx.x] = 0;
for(int i=0; i<nthreads; i++) {
sum[blockIdx.x] = sum[blockIdx.x] + counter[i];
}
}
}
}
int main()
{
srand(time(NULL));
int *sum_h;
int *sum_d;
sum_h = (int*)malloc( N* sizeof(int));
cudaMalloc((void**)&sum_d, N * sizeof(int));
int nblocks = (N + nthreads - 1)/nthreads;
estimatepi<<<nblocks,nthreads>>>(N,sum_d);
cudaMemcpy(sum_h, sum_d, N * sizeof(int), cudaMemcpyDeviceToHost);
int success = 0;
for(int i = 0; i < nblocks; i++){
success = sum_h[i] + success;
}
printf("trials === %d", N * nblocks * nthreads );
printf(" success === %d\n", success);
double pi_estimate = 2 * N * nthreads * nblocks/( double )success;
printf("pi_estimate == %f", pi_estimate);
printf("\n");
cudaFree(sum_d);
free(sum_h);
}
|
e82ddd7a4355e3aca0a8d72cf2bb341f3a323439.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "jacobian_kernel.h"
#define BLOCK 32
__global__
void laplacePDE(float *d_in, float *d_temp, int numRows, int numCols, float *d_error){
/*
Your kernel here: Make sure to check for boundary conditions
*/
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int Offset = y * numCols + x;
for (int i = 0; i < 100; ++i){
if (x < numCols - 1 && x > 0 && y < numRows - 1 && y > 0){
d_temp[Offset] = (d_in[(y - 1) * numCols + x] + d_in[y * numCols + x - 1] + d_in[y * numCols + x + 1] + d_in[(y + 1) * numCols + x]) / 4;
}
__syncthreads();
if (x < numCols && y < numRows){
d_error[0] += abs(d_temp[Offset] - d_in[Offset]);
__syncthreads();
d_in[Offset] = d_temp[Offset];
}
if (d_error[0] < 0.000001f)
break;
}
}
void launch_jacobian(float* d_in, float* d_temp, const int numRows, const int numCols, float* d_error){
// configure launch params here
dim3 block(BLOCK, BLOCK, 1);
dim3 grid((numCols-1)/BLOCK + 1, (numRows-1)/BLOCK + 1, 1);
hipLaunchKernelGGL(( laplacePDE), dim3(grid),dim3(block), 0, 0, d_in, d_temp, numRows, numCols, d_error);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
} | e82ddd7a4355e3aca0a8d72cf2bb341f3a323439.cu | #include "jacobian_kernel.h"
#define BLOCK 32
__global__
void laplacePDE(float *d_in, float *d_temp, int numRows, int numCols, float *d_error){
/*
Your kernel here: Make sure to check for boundary conditions
*/
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int Offset = y * numCols + x;
for (int i = 0; i < 100; ++i){
if (x < numCols - 1 && x > 0 && y < numRows - 1 && y > 0){
d_temp[Offset] = (d_in[(y - 1) * numCols + x] + d_in[y * numCols + x - 1] + d_in[y * numCols + x + 1] + d_in[(y + 1) * numCols + x]) / 4;
}
__syncthreads();
if (x < numCols && y < numRows){
d_error[0] += abs(d_temp[Offset] - d_in[Offset]);
__syncthreads();
d_in[Offset] = d_temp[Offset];
}
if (d_error[0] < 0.000001f)
break;
}
}
void launch_jacobian(float* d_in, float* d_temp, const int numRows, const int numCols, float* d_error){
// configure launch params here
dim3 block(BLOCK, BLOCK, 1);
dim3 grid((numCols-1)/BLOCK + 1, (numRows-1)/BLOCK + 1, 1);
laplacePDE<<<grid,block>>>(d_in, d_temp, numRows, numCols, d_error);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
} |
cf3a1ae085924ef98331e4cbbe2f385f1c6425ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <join/join_common_utils.cuh>
#include <join/join_common_utils.hpp>
#include <join/mixed_join_common_utils.cuh>
#include <cudf/ast/detail/expression_evaluator.cuh>
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/span.hpp>
#include <hipcub/hipcub.hpp>
namespace cudf {
namespace detail {
namespace cg = cooperative_groups;
template <int block_size, bool has_nulls>
__launch_bounds__(block_size) __global__ void compute_mixed_join_output_size_semi(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_hash const hash_probe,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row)
{
// The (required) extern storage of the shared memory array leads to
// conflicting declarations between different templates. The easiest
// workaround is to declare an arbitrary (here char) array type then cast it
// after the fact to the appropriate type.
extern __shared__ char raw_intermediate_storage[];
cudf::ast::detail::IntermediateDataType<has_nulls>* intermediate_storage =
reinterpret_cast<cudf::ast::detail::IntermediateDataType<has_nulls>*>(raw_intermediate_storage);
auto thread_intermediate_storage =
intermediate_storage + (threadIdx.x * device_expression_data.num_intermediates);
std::size_t thread_counter{0};
cudf::size_type const start_idx = threadIdx.x + blockIdx.x * block_size;
cudf::size_type const stride = block_size * gridDim.x;
cudf::size_type const left_num_rows = left_table.num_rows();
cudf::size_type const right_num_rows = right_table.num_rows();
auto const outer_num_rows = (swap_tables ? right_num_rows : left_num_rows);
auto evaluator = cudf::ast::detail::expression_evaluator<has_nulls>(
left_table, right_table, device_expression_data);
// TODO: Address asymmetry in operator.
auto equality = single_expression_equality<has_nulls>{
evaluator, thread_intermediate_storage, swap_tables, equality_probe};
for (cudf::size_type outer_row_index = start_idx; outer_row_index < outer_num_rows;
outer_row_index += stride) {
matches_per_row[outer_row_index] =
((join_type == join_kind::LEFT_ANTI_JOIN) !=
(hash_table_view.contains(outer_row_index, hash_probe, equality)));
thread_counter += matches_per_row[outer_row_index];
}
using BlockReduce = hipcub::BlockReduce<cudf::size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
std::size_t block_counter = BlockReduce(temp_storage).Sum(thread_counter);
// Add block counter to global counter
if (threadIdx.x == 0) {
cuda::atomic_ref<std::size_t, cuda::thread_scope_device> ref{*output_size};
ref.fetch_add(block_counter, cuda::std::memory_order_relaxed);
}
}
template __global__ void compute_mixed_join_output_size_semi<DEFAULT_JOIN_BLOCK_SIZE, true>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_hash const hash_probe,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row);
template __global__ void compute_mixed_join_output_size_semi<DEFAULT_JOIN_BLOCK_SIZE, false>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_hash const hash_probe,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row);
} // namespace detail
} // namespace cudf
| cf3a1ae085924ef98331e4cbbe2f385f1c6425ea.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <join/join_common_utils.cuh>
#include <join/join_common_utils.hpp>
#include <join/mixed_join_common_utils.cuh>
#include <cudf/ast/detail/expression_evaluator.cuh>
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/span.hpp>
#include <cub/cub.cuh>
namespace cudf {
namespace detail {
namespace cg = cooperative_groups;
template <int block_size, bool has_nulls>
__launch_bounds__(block_size) __global__ void compute_mixed_join_output_size_semi(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_hash const hash_probe,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row)
{
// The (required) extern storage of the shared memory array leads to
// conflicting declarations between different templates. The easiest
// workaround is to declare an arbitrary (here char) array type then cast it
// after the fact to the appropriate type.
extern __shared__ char raw_intermediate_storage[];
cudf::ast::detail::IntermediateDataType<has_nulls>* intermediate_storage =
reinterpret_cast<cudf::ast::detail::IntermediateDataType<has_nulls>*>(raw_intermediate_storage);
auto thread_intermediate_storage =
intermediate_storage + (threadIdx.x * device_expression_data.num_intermediates);
std::size_t thread_counter{0};
cudf::size_type const start_idx = threadIdx.x + blockIdx.x * block_size;
cudf::size_type const stride = block_size * gridDim.x;
cudf::size_type const left_num_rows = left_table.num_rows();
cudf::size_type const right_num_rows = right_table.num_rows();
auto const outer_num_rows = (swap_tables ? right_num_rows : left_num_rows);
auto evaluator = cudf::ast::detail::expression_evaluator<has_nulls>(
left_table, right_table, device_expression_data);
// TODO: Address asymmetry in operator.
auto equality = single_expression_equality<has_nulls>{
evaluator, thread_intermediate_storage, swap_tables, equality_probe};
for (cudf::size_type outer_row_index = start_idx; outer_row_index < outer_num_rows;
outer_row_index += stride) {
matches_per_row[outer_row_index] =
((join_type == join_kind::LEFT_ANTI_JOIN) !=
(hash_table_view.contains(outer_row_index, hash_probe, equality)));
thread_counter += matches_per_row[outer_row_index];
}
using BlockReduce = cub::BlockReduce<cudf::size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
std::size_t block_counter = BlockReduce(temp_storage).Sum(thread_counter);
// Add block counter to global counter
if (threadIdx.x == 0) {
cuda::atomic_ref<std::size_t, cuda::thread_scope_device> ref{*output_size};
ref.fetch_add(block_counter, cuda::std::memory_order_relaxed);
}
}
template __global__ void compute_mixed_join_output_size_semi<DEFAULT_JOIN_BLOCK_SIZE, true>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_hash const hash_probe,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row);
template __global__ void compute_mixed_join_output_size_semi<DEFAULT_JOIN_BLOCK_SIZE, false>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_hash const hash_probe,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
ast::detail::expression_device_view device_expression_data,
bool const swap_tables,
std::size_t* output_size,
cudf::device_span<cudf::size_type> matches_per_row);
} // namespace detail
} // namespace cudf
|
296175f232005248f0fe40bb717c1c8158311884.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelUpdateNablaB(float *nabla_b,float *delta_nabla_b) {
nabla_b[threadIdx.x]+=delta_nabla_b[threadIdx.x];
} | 296175f232005248f0fe40bb717c1c8158311884.cu | #include "includes.h"
__global__ void kernelUpdateNablaB(float *nabla_b,float *delta_nabla_b) {
nabla_b[threadIdx.x]+=delta_nabla_b[threadIdx.x];
} |
c386c207541b505173c77aaafa59ae27cbc4ce64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kAdagrad(float *history, float *grad, float delta, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float curr_norm = history[i] - delta;
history[i] = delta + sqrt(curr_norm * curr_norm + grad[i] * grad[i]);
}
} | c386c207541b505173c77aaafa59ae27cbc4ce64.cu | #include "includes.h"
__global__ void kAdagrad(float *history, float *grad, float delta, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float curr_norm = history[i] - delta;
history[i] = delta + sqrt(curr_norm * curr_norm + grad[i] * grad[i]);
}
} |
9b6044fc5a712be33c6b5e4b0a7da422b5495d27.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "def_hip.cuh"
#include <hip/hip_runtime.h>
__device__ long edep_index(long x, long y, long z) {
return x*(ny+2)*(nz+2) + y*(nz+2) + z;
}
__device__ double square(double x){
return x*x;
}
// Piecewise linear interpolation
// Use binary search to find the segment
// Ref: https://software.llnl.gov/yorick-doc/qref/qrfunc09.html
__device__ double interp_cuda(double *y, double *x, const double xp, int n)
{
unsigned low, high, mid;
if (x[0] <= x[n-1]) {
// x is increasing
if (xp <= x[0])
return y[0];
else if (xp >= x[n-1])
return y[n-1];
low = 0;
high = n - 1;
mid = (low + high) >> 1;
while (low < high - 1) {
if (x[mid] >= xp)
high = mid;
else
low = mid;
mid = (low + high) >> 1;
}
//assert((xp >= x[mid]) && (xp <= x[mid + 1]));
return y[mid] +
(y[mid + 1] - y[mid]) / (x[mid + 1] - x[mid]) * (xp - x[mid]);
} else {
// x is decreasing
if (xp >= x[0])
return y[0];
else if (xp <= x[n-1])
return y[n-1];
low = 0;
high = n - 1;
mid = (low + high) >> 1;
while (low < high - 1) {
if (x[mid] <= xp)
low = mid;
else
high = mid;
mid = (low + high) >> 1;
}
//assert((xp <= x[mid]) && (xp >= x[mid + 1]));
return y[mid] +
(y[mid +1] - y[mid]) / (x[mid + 1] - x[mid]) * (xp - x[mid]);
}
}
__device__
bool init(int beam, int pre_raynum, double &x_init, double &y_init, double &z_init,
double &uray_init, const double *beam_norm, double *pow_r, double *phase_r) {
int zones_spanned = ceil((beam_max_x-beam_min_x)/xres);
int b1 = pre_raynum/(rays_per_zone*rays_per_zone);
int b2 = pre_raynum%(rays_per_zone*rays_per_zone);
int ry = b1/(zones_spanned)*rays_per_zone + b2/rays_per_zone;
int rx = b1%(zones_spanned)*rays_per_zone + b2%rays_per_zone;
int raynum = ry*nrays_x+rx;
x_init = beam_min_x;
for (int i = 0; i < (raynum % nrays_x); i++) {
x_init += (beam_max_x - beam_min_x) / (nrays_x - 1);
}
// ab: this should be faster but in order to agree with CPU I do it the way above
//x_init = (raynum % nrays_x) * (beam_max_x - beam_min_x) / (nrays_x - 1) + beam_min_x;
x_init += dx/2;
y_init = beam_min_x;
for (int i = 0; i < (raynum / nrays_x); i++) {
y_init += (beam_max_x - beam_min_x) / (nrays_y - 1);
}
// ab: same here
//y_init = (raynum / nrays_y) * (beam_max_x - beam_min_x) / (nrays_y - 1) + beam_min_x;
y_init += dy/2;
double ref = sqrt(square(x_init) + square(y_init));
//if (ref > beam_max_x) return false;
z_init = focal_length-dz/2;
double theta1 = acos(beam_norm[beam*3+2]);
double theta2 = atan2(beam_norm[beam*3+1]*focal_length, focal_length*beam_norm[beam*3+0]);
double tmp_x = x_init;
// ab: this might cause minor differences due to differences in CPU/GPU cos
x_init = x_init*cos(theta1) + z_init*sin(theta1); // first rotation
z_init = z_init*cos(theta1) - tmp_x*sin(theta1);
double tmp_x0 = x_init;
x_init = x_init*cos(theta2) - y_init*sin(theta2); // second rotation
y_init = y_init*cos(theta2) + tmp_x0*sin(theta2);
uray_init = uray_mult*interp_cuda(pow_r, phase_r, ref, 2001);
return ref <= beam_max_x;
}
__global__
void launch_ray_XYZ(int b, unsigned nindices, double *te_data_g,
double *r_data_g, double *ne_data_g, double *edep, double *bbeam_norm,
double *beam_norm, double *pow_r, double *phase_r,
double xconst, double yconst, double zconst) {
int beam = blockIdx.x + b*(nbeams/nGPUs);
int start = blockIdx.y*blockDim.x + threadIdx.x;
int search_index_x = 1, search_index_y = 1, search_index_z = 1,
thisx_m, thisx_p, thisy_m, thisy_p, thisz_m, thisz_p;
double dl, dm, dn, a1, a2, a3, a4, a5, a6, a7, a8, increment;
double xtemp, ytemp, ztemp;
double half = 0.5001;
double myx, myy, myz, myvx, myvy, myvz, uray, uray_init;
int thisx, thisy, thisz;
__shared__ double ne_data[nr];
__shared__ double r_data[nr];
__shared__ double te_data[nr];
__syncthreads();
int rindices = ceil(nr/(float)threads_per_block);
for (int i = 0; i < rindices; ++i) {
int rindex = threadIdx.x + i*threads_per_block;
if (rindex < nr) {
ne_data[rindex] = ne_data_g[rindex];
r_data[rindex] = r_data_g[rindex];
te_data[rindex] = te_data_g[rindex];
}
}
__syncthreads();
int nthreads = min(max_threads, nrays*nbeams);
int threads_per_beam = nthreads/nbeams;
for (int r = 0; r < nindices; ++r) {
int raynum = start + threads_per_beam*r;
// raynum increases wrt to r so once this condition holds we are done
if (raynum >= nrays) return;
bool t = init(beam, raynum, myx, myy, myz, uray, beam_norm, pow_r, phase_r);
uray_init = uray;
if (t) {
thisx = 0, thisy = 0, thisz = 0;
for (int xx = 0; xx < nx; ++xx) {
if (abs(xx*dx+xmin - myx) <= 0.5001 * dx) {
thisx = xx;
break; // "breaks" out of the xx loop once the if statement condition is met.
}
}
for (int yy = 0; yy < ny; ++yy) {
if (abs(yy*dy+ymin - myy) <= 0.5001 * dy) {
thisy = yy;
break; // "breaks" out of the yy loop once the if statement condition is met.
}
}
for (int zz = 0; zz < nz; ++zz) {
if (abs(zz*dz+zmin - myz) <= 0.5001 * dz) {
thisz = zz;
break; // "breaks" out of the zz loop once the if statement condition is met.
}
}
} else {
continue;
}
// Calculate the total k (=sqrt(kx^2+kz^2)) from the dispersion relation,
// taking into account the local plasma frequency of where the ray starts.
double wtmp = sqrt(square(thisx*dx+xmin) + square(thisy*dy+ymin) + square(thisz*dz+zmin));
wtmp = interp_cuda(ne_data, r_data, wtmp, nr);
double w = sqrt((square(omega) - wtmp*1e6*square(ec)/((double)me*e0)) / square(c));
// Set the initial unnormalized k vectors, which give the initial direction
// of the launched ray.
// For example, kx = kz = 1 would give a 45 degree angle in the +x / +z direction.
// For example, kx = 0 (or kz = 0) would give pure kz (or kx) propagation.
myvx = -1 * beam_norm[beam*3+0];
myvy = -1 * beam_norm[beam*3+1];
myvz = -1 * beam_norm[beam*3+2];
// Length of k for the ray to be launched
double knorm = sqrt(square(myvx) + square(myvy) + square(myvz));
myvx = square(c) * ((myvx / knorm) * w) / omega;
myvy = square(c) * ((myvy / knorm) * w) / omega;
myvz = square(c) * ((myvz / knorm) * w) / omega;
// Time step loop
for (int tt = 0; tt < nt; ++tt) {
// The next ray position depends upon the discrete gradient
// of a 3D array representing electron density
// In order to avoid global memory accesses we compute both
// the electron density and its gradient here
thisx_m = thisx - search_index_x;
thisx_p = thisx + search_index_x;
thisy_m = thisy - search_index_y;
thisy_p = thisy + search_index_y;
thisz_m = thisz - search_index_z;
thisz_p = thisz + search_index_z;
if (thisx == 0) {
thisx_p = 2;
thisx_m = 0;
} else if (thisx == nx-1) {
thisx_p = nx-1;
thisx_m = nx-3;
}
if (thisy == 0) {
thisy_p = 2;
thisy_m = 0;
} else if (thisy == ny-1) {
thisy_p = ny-1;
thisy_m = ny-3;
}
if (thisz == 0) {
thisz_p = 2;
thisz_m = 0;
} else if (thisz == nz-1) {
thisz_p = nz-1;
thisz_m = nz-3;
}
// Convert from coordinates in the grid
// to coordinates in space and pow them
double thisxp = thisx_p*dx+xmin;
double thisxm = thisx_m*dx+xmin;
double thisxd = thisx*dx+xmin;
double thisyp = thisy_p*dy+ymin;
double thisym = thisy_m*dy+ymin;
double thisyd = thisy*dy+ymin;
double thiszp = thisz_p*dz+zmin;
double thiszm = thisz_m*dz+zmin;
double thiszd = thisz*dz+zmin;
// Compute the electron density at each of the six directly
// adjacent nodes
double eden_x_p = interp_cuda(ne_data, r_data,
sqrt(thisxp*thisxp + thisyd*thisyd + thiszd*thiszd),nr);
double eden_x_m = interp_cuda(ne_data, r_data,
sqrt(thisxm*thisxm + thisyd*thisyd + thiszd*thiszd),nr);
double eden_y_p = interp_cuda(ne_data, r_data,
sqrt(thisxd*thisxd + thisyp*thisyp + thiszd*thiszd),nr);
double eden_y_m = interp_cuda(ne_data, r_data,
sqrt(thisxd*thisxd + thisym*thisym + thiszd*thiszd),nr);
double eden_z_p = interp_cuda(ne_data, r_data,
sqrt(thisxd*thisxd + thisyd*thisyd + thiszp*thiszp),nr);
double eden_z_m = interp_cuda(ne_data, r_data,
sqrt(thisxd*thisxd + thisyd*thisyd + thiszm*thiszm),nr);
// Update ray position and velocity vectors
myvx -= xconst * (eden_x_p - eden_x_m);
myvy -= yconst * (eden_y_p - eden_y_m);
myvz -= zconst * (eden_z_p - eden_z_m);
myx += myvx * dt;
myy += myvy * dt;
myz += myvz * dt;
// Helper values to simplify the following computations
xtemp = (myx - xmin)*(1/dx);
ytemp = (myy - ymin)*(1/dy);
ztemp = (myz - zmin)*(1/dz);
// Determines current x index for the position
// These loops count down to be consistent with the C++ code
for (int xx = min(nx-1,thisx+1); xx >= max(0,thisx-1); --xx) {
thisx = (abs(xx-xtemp) < half) ? xx : thisx;
}
// Determines current y index for the position
for (int yy = min(ny-1,thisy+1); yy >= max(0,thisy-1); --yy) {
thisy = (abs(yy-ytemp) < half) ? yy : thisy;
}
// Determines current z index for the position
for (int zz = min(nz-1,thisz+1); zz >= max(0, thisz-1); --zz) {
thisz = (abs(zz-ztemp) < half) ? zz : thisz;
}
// In order to calculate the deposited energy into the plasma,
// we need to calculate the plasma resistivity (eta) and collision frequency (nu_e-i)
double tmp = sqrt(square(thisx*dx+xmin) + square(thisy*dy+ymin) + square(thisz*dz+zmin));
double ed = interp_cuda(ne_data, r_data, tmp, nr);
double etemp = interp_cuda(te_data, r_data, tmp, nr);
double eta = 5.2e-5 * 10.0 / (etemp*sqrt(etemp));
double nuei = (1e6 * ed * square(ec)/me)*eta;
if (absorption == 1) {
// Now we can decrement the ray's energy density according to how much energy
// was absorbed by the plasma.
increment = ed/ncrit * nuei * dt * uray;
uray -= increment;
} else {
// We use this next line instead, if we are just using uray as a bookkeeping device
// (i.e., no absorption by the plasma and no loss of energy by the ray).
increment = uray;
}
// Rather than put all the energy into the cell in which the ray resides, which
// is the so-called "nearest-neighbor" approach (which is very noise and less accurate),
// we will use an area-based linear weighting scheme to deposit the energy to the
// eight nearest nodes of the ray's current location.
// Define xp, yp and zp to be the ray's position relative to the nearest node.
double xp = xtemp-thisx-0.5;
double yp = ytemp-thisy-0.5;
double zp = ztemp-thisz-0.5;
// Below, we interpolate the energy deposition to the grid using linear area weighting.
// The edep array must be two larger in each direction (one for min, one for max)
// to accomodate this routine, since it deposits energy in adjacent cells.
dm = 1.0 - abs(xp);
dn = 1.0 - abs(yp);
dl = 1.0 - abs(zp);
a1 = (1.0-dl)*(1.0-dn)*(1.0-dm);
a2 = (1.0-dl)*(1.0-dn)*dm;
a3 = dl*(1.0-dn)*(1.0-dm);
a4 = dl*(1.0-dn)*dm;
a5 = (1.0-dl)*dn*(1.0-dm);
a6 = (1.0-dl)*dn*dm;
a7 = dl*dn*(1.0-dm);
a8 = dl*dn*dm;
int signx = ((xp < 0) ? -1 : 1), signy = ((yp < 0) ? -1 : 1),
signz = ((zp < 0) ? -1 : 1);
atomicAdd(&edep[edep_index(thisx+1, thisy+1, thisz+1)], a1*increment);
atomicAdd(&edep[edep_index(thisx+1+signx, thisy+1, thisz+1)], a2*increment);
atomicAdd(&edep[edep_index(thisx+1, thisy+1, thisz+1+signz)], a3*increment);
atomicAdd(&edep[edep_index(thisx+1+signx, thisy+1, thisz+1+signz)], a4*increment);
atomicAdd(&edep[edep_index(thisx+1, thisy+1+signy, thisz+1)], a5*increment);
atomicAdd(&edep[edep_index(thisx+1+signx, thisy+1+signy, thisz+1)], a6*increment);
atomicAdd(&edep[edep_index(thisx+1, thisy+1+signy, thisz+1+signz)], a7*increment);
atomicAdd(&edep[edep_index(thisx+1+signx, thisy+1+signy, thisz+1+signz)], a8*increment);
// This will cause the code to stop following the ray once it escapes the extent of the plasma
if (uray <= 0.05 * uray_init ||
myx < (xmin - (dx / 2.0)) || myx > (xmax + (dx / 2.0)) ||
myy < (ymin - (dy / 2.0)) || myy > (ymax + (dy / 2.0)) ||
myz < (zmin - (dz / 2.0)) || myz > (zmax + (dz / 2.0))) {
break;
}
}
}
}
| 9b6044fc5a712be33c6b5e4b0a7da422b5495d27.cu | #include <iostream>
#include "def.cuh"
#include <cuda_runtime.h>
__device__ long edep_index(long x, long y, long z) {
return x*(ny+2)*(nz+2) + y*(nz+2) + z;
}
__device__ double square(double x){
return x*x;
}
// Piecewise linear interpolation
// Use binary search to find the segment
// Ref: https://software.llnl.gov/yorick-doc/qref/qrfunc09.html
__device__ double interp_cuda(double *y, double *x, const double xp, int n)
{
unsigned low, high, mid;
if (x[0] <= x[n-1]) {
// x is increasing
if (xp <= x[0])
return y[0];
else if (xp >= x[n-1])
return y[n-1];
low = 0;
high = n - 1;
mid = (low + high) >> 1;
while (low < high - 1) {
if (x[mid] >= xp)
high = mid;
else
low = mid;
mid = (low + high) >> 1;
}
//assert((xp >= x[mid]) && (xp <= x[mid + 1]));
return y[mid] +
(y[mid + 1] - y[mid]) / (x[mid + 1] - x[mid]) * (xp - x[mid]);
} else {
// x is decreasing
if (xp >= x[0])
return y[0];
else if (xp <= x[n-1])
return y[n-1];
low = 0;
high = n - 1;
mid = (low + high) >> 1;
while (low < high - 1) {
if (x[mid] <= xp)
low = mid;
else
high = mid;
mid = (low + high) >> 1;
}
//assert((xp <= x[mid]) && (xp >= x[mid + 1]));
return y[mid] +
(y[mid +1] - y[mid]) / (x[mid + 1] - x[mid]) * (xp - x[mid]);
}
}
__device__
bool init(int beam, int pre_raynum, double &x_init, double &y_init, double &z_init,
double &uray_init, const double *beam_norm, double *pow_r, double *phase_r) {
int zones_spanned = ceil((beam_max_x-beam_min_x)/xres);
int b1 = pre_raynum/(rays_per_zone*rays_per_zone);
int b2 = pre_raynum%(rays_per_zone*rays_per_zone);
int ry = b1/(zones_spanned)*rays_per_zone + b2/rays_per_zone;
int rx = b1%(zones_spanned)*rays_per_zone + b2%rays_per_zone;
int raynum = ry*nrays_x+rx;
x_init = beam_min_x;
for (int i = 0; i < (raynum % nrays_x); i++) {
x_init += (beam_max_x - beam_min_x) / (nrays_x - 1);
}
// ab: this should be faster but in order to agree with CPU I do it the way above
//x_init = (raynum % nrays_x) * (beam_max_x - beam_min_x) / (nrays_x - 1) + beam_min_x;
x_init += dx/2;
y_init = beam_min_x;
for (int i = 0; i < (raynum / nrays_x); i++) {
y_init += (beam_max_x - beam_min_x) / (nrays_y - 1);
}
// ab: same here
//y_init = (raynum / nrays_y) * (beam_max_x - beam_min_x) / (nrays_y - 1) + beam_min_x;
y_init += dy/2;
double ref = sqrt(square(x_init) + square(y_init));
//if (ref > beam_max_x) return false;
z_init = focal_length-dz/2;
double theta1 = acos(beam_norm[beam*3+2]);
double theta2 = atan2(beam_norm[beam*3+1]*focal_length, focal_length*beam_norm[beam*3+0]);
double tmp_x = x_init;
// ab: this might cause minor differences due to differences in CPU/GPU cos
x_init = x_init*cos(theta1) + z_init*sin(theta1); // first rotation
z_init = z_init*cos(theta1) - tmp_x*sin(theta1);
double tmp_x0 = x_init;
x_init = x_init*cos(theta2) - y_init*sin(theta2); // second rotation
y_init = y_init*cos(theta2) + tmp_x0*sin(theta2);
uray_init = uray_mult*interp_cuda(pow_r, phase_r, ref, 2001);
return ref <= beam_max_x;
}
__global__
void launch_ray_XYZ(int b, unsigned nindices, double *te_data_g,
double *r_data_g, double *ne_data_g, double *edep, double *bbeam_norm,
double *beam_norm, double *pow_r, double *phase_r,
double xconst, double yconst, double zconst) {
int beam = blockIdx.x + b*(nbeams/nGPUs);
int start = blockIdx.y*blockDim.x + threadIdx.x;
int search_index_x = 1, search_index_y = 1, search_index_z = 1,
thisx_m, thisx_p, thisy_m, thisy_p, thisz_m, thisz_p;
double dl, dm, dn, a1, a2, a3, a4, a5, a6, a7, a8, increment;
double xtemp, ytemp, ztemp;
double half = 0.5001;
double myx, myy, myz, myvx, myvy, myvz, uray, uray_init;
int thisx, thisy, thisz;
__shared__ double ne_data[nr];
__shared__ double r_data[nr];
__shared__ double te_data[nr];
__syncthreads();
int rindices = ceil(nr/(float)threads_per_block);
for (int i = 0; i < rindices; ++i) {
int rindex = threadIdx.x + i*threads_per_block;
if (rindex < nr) {
ne_data[rindex] = ne_data_g[rindex];
r_data[rindex] = r_data_g[rindex];
te_data[rindex] = te_data_g[rindex];
}
}
__syncthreads();
int nthreads = min(max_threads, nrays*nbeams);
int threads_per_beam = nthreads/nbeams;
for (int r = 0; r < nindices; ++r) {
int raynum = start + threads_per_beam*r;
// raynum increases wrt to r so once this condition holds we are done
if (raynum >= nrays) return;
bool t = init(beam, raynum, myx, myy, myz, uray, beam_norm, pow_r, phase_r);
uray_init = uray;
if (t) {
thisx = 0, thisy = 0, thisz = 0;
for (int xx = 0; xx < nx; ++xx) {
if (abs(xx*dx+xmin - myx) <= 0.5001 * dx) {
thisx = xx;
break; // "breaks" out of the xx loop once the if statement condition is met.
}
}
for (int yy = 0; yy < ny; ++yy) {
if (abs(yy*dy+ymin - myy) <= 0.5001 * dy) {
thisy = yy;
break; // "breaks" out of the yy loop once the if statement condition is met.
}
}
for (int zz = 0; zz < nz; ++zz) {
if (abs(zz*dz+zmin - myz) <= 0.5001 * dz) {
thisz = zz;
break; // "breaks" out of the zz loop once the if statement condition is met.
}
}
} else {
continue;
}
// Calculate the total k (=sqrt(kx^2+kz^2)) from the dispersion relation,
// taking into account the local plasma frequency of where the ray starts.
double wtmp = sqrt(square(thisx*dx+xmin) + square(thisy*dy+ymin) + square(thisz*dz+zmin));
wtmp = interp_cuda(ne_data, r_data, wtmp, nr);
double w = sqrt((square(omega) - wtmp*1e6*square(ec)/((double)me*e0)) / square(c));
// Set the initial unnormalized k vectors, which give the initial direction
// of the launched ray.
// For example, kx = kz = 1 would give a 45 degree angle in the +x / +z direction.
// For example, kx = 0 (or kz = 0) would give pure kz (or kx) propagation.
myvx = -1 * beam_norm[beam*3+0];
myvy = -1 * beam_norm[beam*3+1];
myvz = -1 * beam_norm[beam*3+2];
// Length of k for the ray to be launched
double knorm = sqrt(square(myvx) + square(myvy) + square(myvz));
myvx = square(c) * ((myvx / knorm) * w) / omega;
myvy = square(c) * ((myvy / knorm) * w) / omega;
myvz = square(c) * ((myvz / knorm) * w) / omega;
// Time step loop
for (int tt = 0; tt < nt; ++tt) {
// The next ray position depends upon the discrete gradient
// of a 3D array representing electron density
// In order to avoid global memory accesses we compute both
// the electron density and its gradient here
thisx_m = thisx - search_index_x;
thisx_p = thisx + search_index_x;
thisy_m = thisy - search_index_y;
thisy_p = thisy + search_index_y;
thisz_m = thisz - search_index_z;
thisz_p = thisz + search_index_z;
if (thisx == 0) {
thisx_p = 2;
thisx_m = 0;
} else if (thisx == nx-1) {
thisx_p = nx-1;
thisx_m = nx-3;
}
if (thisy == 0) {
thisy_p = 2;
thisy_m = 0;
} else if (thisy == ny-1) {
thisy_p = ny-1;
thisy_m = ny-3;
}
if (thisz == 0) {
thisz_p = 2;
thisz_m = 0;
} else if (thisz == nz-1) {
thisz_p = nz-1;
thisz_m = nz-3;
}
// Convert from coordinates in the grid
// to coordinates in space and pow them
double thisxp = thisx_p*dx+xmin;
double thisxm = thisx_m*dx+xmin;
double thisxd = thisx*dx+xmin;
double thisyp = thisy_p*dy+ymin;
double thisym = thisy_m*dy+ymin;
double thisyd = thisy*dy+ymin;
double thiszp = thisz_p*dz+zmin;
double thiszm = thisz_m*dz+zmin;
double thiszd = thisz*dz+zmin;
// Compute the electron density at each of the six directly
// adjacent nodes
double eden_x_p = interp_cuda(ne_data, r_data,
sqrt(thisxp*thisxp + thisyd*thisyd + thiszd*thiszd),nr);
double eden_x_m = interp_cuda(ne_data, r_data,
sqrt(thisxm*thisxm + thisyd*thisyd + thiszd*thiszd),nr);
double eden_y_p = interp_cuda(ne_data, r_data,
sqrt(thisxd*thisxd + thisyp*thisyp + thiszd*thiszd),nr);
double eden_y_m = interp_cuda(ne_data, r_data,
sqrt(thisxd*thisxd + thisym*thisym + thiszd*thiszd),nr);
double eden_z_p = interp_cuda(ne_data, r_data,
sqrt(thisxd*thisxd + thisyd*thisyd + thiszp*thiszp),nr);
double eden_z_m = interp_cuda(ne_data, r_data,
sqrt(thisxd*thisxd + thisyd*thisyd + thiszm*thiszm),nr);
// Update ray position and velocity vectors
myvx -= xconst * (eden_x_p - eden_x_m);
myvy -= yconst * (eden_y_p - eden_y_m);
myvz -= zconst * (eden_z_p - eden_z_m);
myx += myvx * dt;
myy += myvy * dt;
myz += myvz * dt;
// Helper values to simplify the following computations
xtemp = (myx - xmin)*(1/dx);
ytemp = (myy - ymin)*(1/dy);
ztemp = (myz - zmin)*(1/dz);
// Determines current x index for the position
// These loops count down to be consistent with the C++ code
for (int xx = min(nx-1,thisx+1); xx >= max(0,thisx-1); --xx) {
thisx = (abs(xx-xtemp) < half) ? xx : thisx;
}
// Determines current y index for the position
for (int yy = min(ny-1,thisy+1); yy >= max(0,thisy-1); --yy) {
thisy = (abs(yy-ytemp) < half) ? yy : thisy;
}
// Determines current z index for the position
for (int zz = min(nz-1,thisz+1); zz >= max(0, thisz-1); --zz) {
thisz = (abs(zz-ztemp) < half) ? zz : thisz;
}
// In order to calculate the deposited energy into the plasma,
// we need to calculate the plasma resistivity (eta) and collision frequency (nu_e-i)
double tmp = sqrt(square(thisx*dx+xmin) + square(thisy*dy+ymin) + square(thisz*dz+zmin));
double ed = interp_cuda(ne_data, r_data, tmp, nr);
double etemp = interp_cuda(te_data, r_data, tmp, nr);
double eta = 5.2e-5 * 10.0 / (etemp*sqrt(etemp));
double nuei = (1e6 * ed * square(ec)/me)*eta;
if (absorption == 1) {
// Now we can decrement the ray's energy density according to how much energy
// was absorbed by the plasma.
increment = ed/ncrit * nuei * dt * uray;
uray -= increment;
} else {
// We use this next line instead, if we are just using uray as a bookkeeping device
// (i.e., no absorption by the plasma and no loss of energy by the ray).
increment = uray;
}
// Rather than put all the energy into the cell in which the ray resides, which
// is the so-called "nearest-neighbor" approach (which is very noise and less accurate),
// we will use an area-based linear weighting scheme to deposit the energy to the
// eight nearest nodes of the ray's current location.
// Define xp, yp and zp to be the ray's position relative to the nearest node.
double xp = xtemp-thisx-0.5;
double yp = ytemp-thisy-0.5;
double zp = ztemp-thisz-0.5;
// Below, we interpolate the energy deposition to the grid using linear area weighting.
// The edep array must be two larger in each direction (one for min, one for max)
// to accomodate this routine, since it deposits energy in adjacent cells.
dm = 1.0 - abs(xp);
dn = 1.0 - abs(yp);
dl = 1.0 - abs(zp);
a1 = (1.0-dl)*(1.0-dn)*(1.0-dm);
a2 = (1.0-dl)*(1.0-dn)*dm;
a3 = dl*(1.0-dn)*(1.0-dm);
a4 = dl*(1.0-dn)*dm;
a5 = (1.0-dl)*dn*(1.0-dm);
a6 = (1.0-dl)*dn*dm;
a7 = dl*dn*(1.0-dm);
a8 = dl*dn*dm;
int signx = ((xp < 0) ? -1 : 1), signy = ((yp < 0) ? -1 : 1),
signz = ((zp < 0) ? -1 : 1);
atomicAdd(&edep[edep_index(thisx+1, thisy+1, thisz+1)], a1*increment);
atomicAdd(&edep[edep_index(thisx+1+signx, thisy+1, thisz+1)], a2*increment);
atomicAdd(&edep[edep_index(thisx+1, thisy+1, thisz+1+signz)], a3*increment);
atomicAdd(&edep[edep_index(thisx+1+signx, thisy+1, thisz+1+signz)], a4*increment);
atomicAdd(&edep[edep_index(thisx+1, thisy+1+signy, thisz+1)], a5*increment);
atomicAdd(&edep[edep_index(thisx+1+signx, thisy+1+signy, thisz+1)], a6*increment);
atomicAdd(&edep[edep_index(thisx+1, thisy+1+signy, thisz+1+signz)], a7*increment);
atomicAdd(&edep[edep_index(thisx+1+signx, thisy+1+signy, thisz+1+signz)], a8*increment);
// This will cause the code to stop following the ray once it escapes the extent of the plasma
if (uray <= 0.05 * uray_init ||
myx < (xmin - (dx / 2.0)) || myx > (xmax + (dx / 2.0)) ||
myy < (ymin - (dy / 2.0)) || myy > (ymax + (dy / 2.0)) ||
myz < (zmin - (dz / 2.0)) || myz > (zmax + (dz / 2.0))) {
break;
}
}
}
}
|
e324c4d18338fd7f6a1a275cbca2ae013012c9e9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "libaxb.h"
#include "libaxb/backend.h"
#include "libaxb/general.h"
#include "libaxb/backend/op.h"
#include <hip/hip_runtime.h>
/////////////////////
__global__
static void kernel_vec_set_from_device(int n, double *x, const double *alpha)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) x[i] = *alpha;
}
__global__
static void kernel_vec_set_from_host(int n, double *x, double alpha)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) x[i] = alpha;
}
static axbStatus_t op_vec_set(struct axbVec_s *x, const struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
double *d_x = (double*)x->data;
if (strcmp(alpha->memBackend->name, "host") != 0) { // alpha on GPU
const double *d_alpha = (const double*)alpha->data;
hipLaunchKernelGGL(( kernel_vec_set_from_device), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, d_alpha);
} else { // alpha on CPU
double d_alpha = *((const double*)alpha->data);
hipLaunchKernelGGL(( kernel_vec_set_from_host), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, d_alpha);
}
return 0;
}
/////////////////////
__global__
static void kernel_vec_sqrtabs(int n, double *x)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) x[i] = sqrt(fabs(x[i]));
}
static axbStatus_t op_vec_sqrtabs(struct axbVec_s *x, void *aux_data)
{
(void)aux_data;
double *d_x = (double*)x->data;
hipLaunchKernelGGL(( kernel_vec_sqrtabs), dim3(256), dim3(256), 0, 0, (int)x->size, d_x);
return 0;
}
/////////////////////
static axbStatus_t op_vec_zero(struct axbVec_s *x, void *aux_data)
{
(void)aux_data;
double *d_x = (double*)x->data;
hipLaunchKernelGGL(( kernel_vec_set_from_host), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, 0);
return 0;
}
/////////////////////
__global__
static void kernel_vec_scale_from_device(int n, double *x, const double *alpha)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) x[i] *= *alpha;
}
__global__
static void kernel_vec_scale_from_host(int n, double *x, double alpha)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) x[i] *= alpha;
}
static axbStatus_t op_vec_scale(struct axbVec_s *x, const struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
double *d_x = (double*)x->data;
if (strcmp(alpha->memBackend->name, "host") != 0) { // alpha on GPU
double *d_alpha = (double*)alpha->data;
hipLaunchKernelGGL(( kernel_vec_scale_from_device), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, d_alpha);
} else { // alpha on CPU
double d_alpha = *((double*)alpha->data);
hipLaunchKernelGGL(( kernel_vec_scale_from_host), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, d_alpha);
}
return 0;
}
//
// Reduction operations
//
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/////////////////////
__global__
static void kernel_vec_sum(int n, const double *x, double *alpha)
{
__shared__ double reduction_buffer[256];
double t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) t += x[i];
reduction_buffer[threadIdx.x] = t;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(alpha, reduction_buffer[0]);
}
static axbStatus_t op_vec_sum(const struct axbVec_s *x, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
hipLaunchKernelGGL(( kernel_vec_set_from_host), dim3(1), dim3(1), 0, 0, (int)1, d_alpha, 0);
hipLaunchKernelGGL(( kernel_vec_sum), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, d_alpha);
return 0;
}
/////////////////////
__global__
static void kernel_vec_dot(int n, const double *x, const double *y, double *alpha)
{
__shared__ double reduction_buffer[256];
double t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) t += x[i] * y[i];
reduction_buffer[threadIdx.x] = t;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(alpha, reduction_buffer[0]);
}
static axbStatus_t op_vec_dot(const struct axbVec_s *x, const struct axbVec_s *y, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
const double *d_x = (const double*)x->data;
const double *d_y = (const double*)y->data;
double *d_alpha = (double*)alpha->data;
hipLaunchKernelGGL(( kernel_vec_set_from_host), dim3(1), dim3(1), 0, 0, (int)1, d_alpha, 0);
hipLaunchKernelGGL(( kernel_vec_dot), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, d_y, d_alpha);
return 0;
}
/////////////////////
static axbStatus_t op_vec_tdot(const struct axbVec_s *x, const struct axbVec_s *y, struct axbScalar_s *alpha, void *aux_data)
{
return op_vec_dot(x, y, alpha, aux_data); // TODO: update for complex scalar types
}
/////////////////////
static axbStatus_t op_vec_mdot(const struct axbVec_s *x, size_t num_vecs, const struct axbVec_s **y, struct axbScalar_s **mdot, void *aux_data)
{
(void)aux_data;
// TODO: Replace by faster variant
for (size_t i=0; i<num_vecs; ++i)
op_vec_dot(x, y[i], mdot[i], aux_data);
return 0;
}
/////////////////////
__global__
static void kernel_vec_norm1(int n, const double *x, double *alpha)
{
__shared__ double reduction_buffer[256];
double t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) t += fabs(x[i]);
reduction_buffer[threadIdx.x] = t;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(alpha, reduction_buffer[0]);
}
static axbStatus_t op_vec_norm1(const struct axbVec_s *x, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
hipLaunchKernelGGL(( kernel_vec_set_from_host), dim3(1), dim3(1), 0, 0, (int)1, d_alpha, 0);
hipLaunchKernelGGL(( kernel_vec_norm1), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, d_alpha);
return 0;
}
/////////////////////
__global__
static void kernel_vec_norm2(int n, const double *x, double *alpha)
{
__shared__ double reduction_buffer[256];
double t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) t += x[i] * x[i];
reduction_buffer[threadIdx.x] = t;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(alpha, reduction_buffer[0]);
}
static axbStatus_t op_vec_norm2(const struct axbVec_s *x, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
hipLaunchKernelGGL(( kernel_vec_set_from_host), dim3(1), dim3(1), 0, 0, (int)1, d_alpha, 0);
hipLaunchKernelGGL(( kernel_vec_norm2), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, d_alpha);
hipLaunchKernelGGL(( kernel_vec_sqrtabs), dim3(1), dim3(1), 0, 0, (int)1, d_alpha);
return 0;
}
/////////////////////
__global__
static void kernel_vec_norminf(int n, const double *x, double *alpha)
{
__shared__ double reduction_buffer[256];
double t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) t = max(t, fabs(x[i]));
reduction_buffer[threadIdx.x] = t;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] = max(reduction_buffer[threadIdx.x], reduction_buffer[threadIdx.x+stride]);
}
if (threadIdx.x == 0)
alpha[blockIdx.x] = reduction_buffer[0];
}
static axbStatus_t op_vec_norminf(const struct axbVec_s *x, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
double *tmp;
hipMalloc((void**)&tmp, sizeof(double) * 256); // TODO: Avoid allocation in each call to op_vec_norminf
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
hipLaunchKernelGGL(( kernel_vec_set_from_host), dim3(1), dim3(1), 0, 0, (int)1, d_alpha, 0);
hipLaunchKernelGGL(( kernel_vec_norminf), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, tmp);
hipLaunchKernelGGL(( kernel_vec_norminf), dim3(1), dim3(256), 0, 0, (int)256, tmp, d_alpha);
hipFree(tmp);
return 0;
}
/////////////////////
__global__
static void kernel_vec_dotnorm2(int n, const double *s, const double *t, double *dot_st, double *norm_t)
{
__shared__ double reduction_buffer[256];
double dot = 0;
double norm = 0;
*dot_st = 0;
*norm_t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) {
double val_t = t[i];
dot += s[i] * val_t;
norm += val_t * val_t;
}
//
// first reduction for dot
//
reduction_buffer[threadIdx.x] = dot;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(dot_st, reduction_buffer[0]);
//
// second reduction for norm
//
reduction_buffer[threadIdx.x] = norm;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(norm_t, reduction_buffer[0]);
}
static axbStatus_t op_vec_dotnorm2(const struct axbVec_s *s, const struct axbVec_s *t, struct axbScalar_s *dot, struct axbScalar_s *norm, void *aux_data)
{
(void)aux_data;
const double *d_s = (const double*)s->data;
const double *d_t = (const double*)t->data;
double *d_dot = (double*)dot->data;
double *d_norm = (double*)norm->data;
hipLaunchKernelGGL(( kernel_vec_set_from_host), dim3(1), dim3(1), 0, 0, (int)1, d_dot, 0);
hipLaunchKernelGGL(( kernel_vec_set_from_host), dim3(1), dim3(1), 0, 0, (int)1, d_norm, 0);
hipLaunchKernelGGL(( kernel_vec_dotnorm2), dim3(256), dim3(256), 0, 0, (int)s->size, d_s, d_t, d_dot, d_norm);
hipLaunchKernelGGL(( kernel_vec_sqrtabs), dim3(1), dim3(1), 0, 0, (int)1, d_norm);
return 0;
}
/////////////////////
__global__
static void kernel_vec_max(int n, const double *x, int *index, double *alpha)
{
__shared__ double reduction_buffer_max[256];
__shared__ int reduction_buffer_idx[256];
double t = x[0];
int idx = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) {
double xi = x[i];
if (t < xi) {
t = xi;
idx = i;
}
}
reduction_buffer_max[threadIdx.x] = t;
reduction_buffer_idx[threadIdx.x] = idx;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride) {
if (reduction_buffer_max[threadIdx.x] < reduction_buffer_max[threadIdx.x+stride]) {
reduction_buffer_max[threadIdx.x] = reduction_buffer_max[threadIdx.x+stride];
reduction_buffer_idx[threadIdx.x] = reduction_buffer_idx[threadIdx.x+stride];
}
}
}
if (threadIdx.x == 0) {
alpha[blockIdx.x] = reduction_buffer_max[0];
index[blockIdx.x] = reduction_buffer_idx[0];
}
}
static axbStatus_t op_vec_max(const struct axbVec_s *x, size_t *idx, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
//
// TODO: Refactor this! The whole computation can be done without host<->device copies!
//
double *tmp; hipMalloc((void**)&tmp, sizeof(double) * 256); // TODO: Avoid allocation in each call to op_vec_max
int *index; hipMalloc((void**)&index, sizeof(int) * 256); // TODO: Avoid allocation in each call to op_vec_max
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
hipLaunchKernelGGL(( kernel_vec_max), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, index, tmp);
double host_val[256];
hipMemcpy(host_val, tmp, 256 * sizeof(double), hipMemcpyDeviceToHost);
int host_idx[256];
hipMemcpy(host_idx, index, 256 * sizeof(int), hipMemcpyDeviceToHost);
double val_max = host_val[0];
int idx_max = host_idx[0];
for (size_t i=1; i<256; ++i) {
if (val_max < host_val[i]) {
val_max = host_val[i];
idx_max = host_idx[i];
}
}
*idx = idx_max;
hipMemcpy(d_alpha, (void*)&val_max, sizeof(double), hipMemcpyHostToDevice);
hipFree(tmp);
hipFree(index);
return 0;
}
/////////////////////
__global__
static void kernel_vec_min(int n, const double *x, int *index, double *alpha)
{
__shared__ double reduction_buffer_min[256];
__shared__ int reduction_buffer_idx[256];
double t = x[0];
int idx = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) {
double xi = x[i];
if (t > xi) {
t = xi;
idx = i;
}
}
reduction_buffer_min[threadIdx.x] = t;
reduction_buffer_idx[threadIdx.x] = idx;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride) {
if (reduction_buffer_min[threadIdx.x] > reduction_buffer_min[threadIdx.x+stride]) {
reduction_buffer_min[threadIdx.x] = reduction_buffer_min[threadIdx.x+stride];
reduction_buffer_idx[threadIdx.x] = reduction_buffer_idx[threadIdx.x+stride];
}
}
}
if (threadIdx.x == 0) {
alpha[blockIdx.x] = reduction_buffer_min[0];
index[blockIdx.x] = reduction_buffer_idx[0];
}
}
static axbStatus_t op_vec_min(const struct axbVec_s *x, size_t *idx, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
//
// TODO: Refactor this! The whole computation can be done without host<->device copies!
//
double *tmp; hipMalloc((void**)&tmp, sizeof(double) * 256); // TODO: Avoid allocation in each call to op_vec_max
int *index; hipMalloc((void**)&index, sizeof(int) * 256); // TODO: Avoid allocation in each call to op_vec_max
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
hipLaunchKernelGGL(( kernel_vec_min), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, index, tmp);
double host_val[256];
hipMemcpy(host_val, tmp, 256 * sizeof(double), hipMemcpyDeviceToHost);
int host_idx[256];
hipMemcpy(host_idx, index, 256 * sizeof(int), hipMemcpyDeviceToHost);
double val_min = host_val[0];
int idx_min = host_idx[0];
for (size_t i=1; i<256; ++i) {
if (val_min > host_val[i]) {
val_min = host_val[i];
idx_min = host_idx[i];
}
}
*idx = idx_min;
hipMemcpy(d_alpha, (void*)&val_min, sizeof(double), hipMemcpyHostToDevice);
hipFree(tmp);
hipFree(index);
return 0;
}
//
// Vector-vector operations
//
__global__
static void kernel_vec_copy(int n, const double *x, double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) y[i] = x[i];
}
static axbStatus_t op_vec_copy(const struct axbVec_s *x, struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
const double *d_x = (const double*)x->data;
double *d_y = (double*)y->data;
hipLaunchKernelGGL(( kernel_vec_copy), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, d_y);
return 0;
}
/////////////////////
__global__
static void kernel_vec_swap(int n, double *x, double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) {
double t = y[i];
y[i] = x[i];
x[i] = t;
}
}
static axbStatus_t op_vec_swap(struct axbVec_s *x, struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
double *d_x = (double*)x->data;
double *d_y = (double*)y->data;
hipLaunchKernelGGL(( kernel_vec_swap), dim3(256), dim3(256), 0, 0, (int)x->size, d_x, d_y);
return 0;
}
/////////////////////
__global__
static void kernel_vec_axpy(int n, double *y, const double *alpha, const double *x)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) y[i] = *alpha * x[i] + y[i];
}
static axbStatus_t op_vec_axpy(struct axbVec_s *y, const struct axbScalar_s *alpha, const struct axbVec_s *x, void *aux_data)
{
(void)aux_data;
double *d_y = (double*)y->data;
const double *d_alpha = (const double*)alpha->data;
const double *d_x = (const double*)x->data;
hipLaunchKernelGGL(( kernel_vec_axpy), dim3(256), dim3(256), 0, 0, (int)y->size, d_y, d_alpha, d_x);
return 0;
}
/////////////////////
__global__
static void kernel_vec_aypx(int n, double *y, const double *alpha, const double *x)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) y[i] = *alpha * y[i] + x[i];
}
static axbStatus_t op_vec_aypx(struct axbVec_s *y, const struct axbScalar_s *alpha, const struct axbVec_s *x, void *aux_data)
{
(void)aux_data;
double *d_y = (double*)y->data;
const double *d_alpha = (const double*)alpha->data;
const double *d_x = (const double*)x->data;
hipLaunchKernelGGL(( kernel_vec_aypx), dim3(256), dim3(256), 0, 0, (int)y->size, d_y, d_alpha, d_x);
return 0;
}
/////////////////////
__global__
static void kernel_vec_axpbypcz(int n, double *z, const double *alpha, const double *beta, const double *gamma, const double *x, const double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) z[i] = *alpha * x[i] + *beta * y[i] + *gamma * z[i];
}
static axbStatus_t op_vec_axpbypcz(struct axbVec_s *z, const struct axbScalar_s *alpha, const struct axbScalar_s *beta, const struct axbScalar_s *gamma, const struct axbVec_s *x, const struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
double *d_z = (double*)z->data;
const double *d_alpha = (const double*)alpha->data;
const double *d_beta = (const double*)beta->data;
const double *d_gamma = (const double*)gamma->data;
const double *d_x = (const double*)x->data;
const double *d_y = (const double*)y->data;
hipLaunchKernelGGL(( kernel_vec_axpbypcz), dim3(256), dim3(256), 0, 0, (int)z->size, d_z, d_alpha, d_beta, d_gamma, d_x, d_y);
return 0;
}
/////////////////////
__global__
static void kernel_vec_waxpy(int n, double *w, const double *alpha, const double *x, const double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) w[i] = *alpha * x[i] + y[i];
}
static axbStatus_t op_vec_waxpy(struct axbVec_s *w, const struct axbScalar_s *alpha, const struct axbVec_s *x, const struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
double *d_w = (double*)w->data;
const double *d_alpha = (const double*)alpha->data;
const double *d_x = (const double*)x->data;
const double *d_y = (const double*)y->data;
hipLaunchKernelGGL(( kernel_vec_waxpy), dim3(256), dim3(256), 0, 0, (int)w->size, d_w, d_alpha, d_x, d_y);
return 0;
}
/////////////////////
static axbStatus_t op_vec_maxpy(struct axbVec_s *y, size_t num_vecs, const struct axbScalar_s **alpha, const struct axbVec_s **x, void *aux_data) {
// TODO: Be more efficient than this!
for (size_t i=0; i<num_vecs; ++i)
op_vec_axpy(y, alpha[i], x[i], aux_data);
return 0;
}
/////////////////////
__global__
static void kernel_vec_pointwisemult(int n, double *w, const double *x, const double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) w[i] = x[i] * y[i];
}
static axbStatus_t op_vec_pointwisemult(struct axbVec_s *w, const struct axbVec_s *x, const struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
double *d_w = (double*)w->data;
const double *d_x = (const double*)x->data;
const double *d_y = (const double*)y->data;
hipLaunchKernelGGL(( kernel_vec_pointwisemult), dim3(256), dim3(256), 0, 0, (int)w->size, d_w, d_x, d_y);
return 0;
}
/////////////////////
__global__
static void kernel_vec_pointwisediv(int n, double *w, const double *x, const double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) w[i] = x[i] / y[i];
}
static axbStatus_t op_vec_pointwisediv(struct axbVec_s *w, const struct axbVec_s *x, const struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
double *d_w = (double*)w->data;
const double *d_x = (const double*)x->data;
const double *d_y = (const double*)y->data;
hipLaunchKernelGGL(( kernel_vec_pointwisediv), dim3(256), dim3(256), 0, 0, (int)w->size, d_w, d_x, d_y);
return 0;
}
/////////////////////
extern "C" axbStatus_t axbOpBackendRegister_CUDA(struct axbHandle_s *handle)
{
struct axbOpBackend_s *cuda_backend;
axbStatus_t status = axbOpBackendCreate(&cuda_backend); AXB_ERRCHK(status);
// populate host_backend:
status = axbOpBackendSetName(cuda_backend, "CUDA"); AXB_ERRCHK(status);
axbOperationID_t op_id = 0;
#define AXB_ADD_OPERATION(OPNAME, ENUMCONSTANT) status = axbOpBackendAddOperation(cuda_backend, #OPNAME, (axbStatus_t (*)(void))OPNAME, NULL, &op_id); AXB_ERRCHK(status); assert(op_id == ENUMCONSTANT && "Logic error: op_id != " #ENUMCONSTANT)
// inplace operations
AXB_ADD_OPERATION(op_vec_set, AXB_OP_VEC_SET);
AXB_ADD_OPERATION(op_vec_sqrtabs, AXB_OP_VEC_SQRTABS);
AXB_ADD_OPERATION(op_vec_zero, AXB_OP_VEC_ZERO);
AXB_ADD_OPERATION(op_vec_scale, AXB_OP_VEC_SCALE);
// reduction operations
AXB_ADD_OPERATION(op_vec_sum, AXB_OP_VEC_SUM);
AXB_ADD_OPERATION(op_vec_dot, AXB_OP_VEC_DOT);
AXB_ADD_OPERATION(op_vec_tdot, AXB_OP_VEC_TDOT);
AXB_ADD_OPERATION(op_vec_mdot, AXB_OP_VEC_MDOT);
AXB_ADD_OPERATION(op_vec_norm1, AXB_OP_VEC_NORM1);
AXB_ADD_OPERATION(op_vec_norm2, AXB_OP_VEC_NORM2);
AXB_ADD_OPERATION(op_vec_norminf, AXB_OP_VEC_NORMINF);
AXB_ADD_OPERATION(op_vec_dotnorm2, AXB_OP_VEC_DOTNORM2);
AXB_ADD_OPERATION(op_vec_max, AXB_OP_VEC_MAX);
AXB_ADD_OPERATION(op_vec_min, AXB_OP_VEC_MIN);
// vector-vector operations
AXB_ADD_OPERATION(op_vec_copy, AXB_OP_VEC_COPY);
AXB_ADD_OPERATION(op_vec_swap, AXB_OP_VEC_SWAP);
AXB_ADD_OPERATION(op_vec_axpy, AXB_OP_VEC_AXPY);
AXB_ADD_OPERATION(op_vec_aypx, AXB_OP_VEC_AYPX);
AXB_ADD_OPERATION(op_vec_axpbypcz, AXB_OP_VEC_AXPBYPCZ);
AXB_ADD_OPERATION(op_vec_waxpy, AXB_OP_VEC_WAXPY);
AXB_ADD_OPERATION(op_vec_maxpy, AXB_OP_VEC_MAXPY);
AXB_ADD_OPERATION(op_vec_pointwisemult, AXB_OP_VEC_POINTWISEMULT);
AXB_ADD_OPERATION(op_vec_pointwisediv, AXB_OP_VEC_POINTWISEDIV);
#undef AXB_ADD_OPERATION
// push into enclosing context identified by handle:
status = axbOpBackendRegister(handle, cuda_backend); AXB_ERRCHK(status);
return 0;
}
| e324c4d18338fd7f6a1a275cbca2ae013012c9e9.cu |
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "libaxb.h"
#include "libaxb/backend.h"
#include "libaxb/general.h"
#include "libaxb/backend/op.h"
#include <cuda_runtime.h>
/////////////////////
__global__
static void kernel_vec_set_from_device(int n, double *x, const double *alpha)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) x[i] = *alpha;
}
__global__
static void kernel_vec_set_from_host(int n, double *x, double alpha)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) x[i] = alpha;
}
static axbStatus_t op_vec_set(struct axbVec_s *x, const struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
double *d_x = (double*)x->data;
if (strcmp(alpha->memBackend->name, "host") != 0) { // alpha on GPU
const double *d_alpha = (const double*)alpha->data;
kernel_vec_set_from_device<<<256, 256>>>((int)x->size, d_x, d_alpha);
} else { // alpha on CPU
double d_alpha = *((const double*)alpha->data);
kernel_vec_set_from_host<<<256, 256>>>((int)x->size, d_x, d_alpha);
}
return 0;
}
/////////////////////
__global__
static void kernel_vec_sqrtabs(int n, double *x)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) x[i] = sqrt(fabs(x[i]));
}
static axbStatus_t op_vec_sqrtabs(struct axbVec_s *x, void *aux_data)
{
(void)aux_data;
double *d_x = (double*)x->data;
kernel_vec_sqrtabs<<<256, 256>>>((int)x->size, d_x);
return 0;
}
/////////////////////
static axbStatus_t op_vec_zero(struct axbVec_s *x, void *aux_data)
{
(void)aux_data;
double *d_x = (double*)x->data;
kernel_vec_set_from_host<<<256, 256>>>((int)x->size, d_x, 0);
return 0;
}
/////////////////////
__global__
static void kernel_vec_scale_from_device(int n, double *x, const double *alpha)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) x[i] *= *alpha;
}
__global__
static void kernel_vec_scale_from_host(int n, double *x, double alpha)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) x[i] *= alpha;
}
static axbStatus_t op_vec_scale(struct axbVec_s *x, const struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
double *d_x = (double*)x->data;
if (strcmp(alpha->memBackend->name, "host") != 0) { // alpha on GPU
double *d_alpha = (double*)alpha->data;
kernel_vec_scale_from_device<<<256, 256>>>((int)x->size, d_x, d_alpha);
} else { // alpha on CPU
double d_alpha = *((double*)alpha->data);
kernel_vec_scale_from_host<<<256, 256>>>((int)x->size, d_x, d_alpha);
}
return 0;
}
//
// Reduction operations
//
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/////////////////////
__global__
static void kernel_vec_sum(int n, const double *x, double *alpha)
{
__shared__ double reduction_buffer[256];
double t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) t += x[i];
reduction_buffer[threadIdx.x] = t;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(alpha, reduction_buffer[0]);
}
static axbStatus_t op_vec_sum(const struct axbVec_s *x, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
kernel_vec_set_from_host<<<1, 1>>>((int)1, d_alpha, 0);
kernel_vec_sum<<<256, 256>>>((int)x->size, d_x, d_alpha);
return 0;
}
/////////////////////
__global__
static void kernel_vec_dot(int n, const double *x, const double *y, double *alpha)
{
__shared__ double reduction_buffer[256];
double t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) t += x[i] * y[i];
reduction_buffer[threadIdx.x] = t;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(alpha, reduction_buffer[0]);
}
static axbStatus_t op_vec_dot(const struct axbVec_s *x, const struct axbVec_s *y, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
const double *d_x = (const double*)x->data;
const double *d_y = (const double*)y->data;
double *d_alpha = (double*)alpha->data;
kernel_vec_set_from_host<<<1, 1>>>((int)1, d_alpha, 0);
kernel_vec_dot<<<256, 256>>>((int)x->size, d_x, d_y, d_alpha);
return 0;
}
/////////////////////
static axbStatus_t op_vec_tdot(const struct axbVec_s *x, const struct axbVec_s *y, struct axbScalar_s *alpha, void *aux_data)
{
return op_vec_dot(x, y, alpha, aux_data); // TODO: update for complex scalar types
}
/////////////////////
static axbStatus_t op_vec_mdot(const struct axbVec_s *x, size_t num_vecs, const struct axbVec_s **y, struct axbScalar_s **mdot, void *aux_data)
{
(void)aux_data;
// TODO: Replace by faster variant
for (size_t i=0; i<num_vecs; ++i)
op_vec_dot(x, y[i], mdot[i], aux_data);
return 0;
}
/////////////////////
__global__
static void kernel_vec_norm1(int n, const double *x, double *alpha)
{
__shared__ double reduction_buffer[256];
double t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) t += fabs(x[i]);
reduction_buffer[threadIdx.x] = t;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(alpha, reduction_buffer[0]);
}
static axbStatus_t op_vec_norm1(const struct axbVec_s *x, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
kernel_vec_set_from_host<<<1, 1>>>((int)1, d_alpha, 0);
kernel_vec_norm1<<<256, 256>>>((int)x->size, d_x, d_alpha);
return 0;
}
/////////////////////
__global__
static void kernel_vec_norm2(int n, const double *x, double *alpha)
{
__shared__ double reduction_buffer[256];
double t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) t += x[i] * x[i];
reduction_buffer[threadIdx.x] = t;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(alpha, reduction_buffer[0]);
}
static axbStatus_t op_vec_norm2(const struct axbVec_s *x, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
kernel_vec_set_from_host<<<1, 1>>>((int)1, d_alpha, 0);
kernel_vec_norm2<<<256, 256>>>((int)x->size, d_x, d_alpha);
kernel_vec_sqrtabs<<<1, 1>>>((int)1, d_alpha);
return 0;
}
/////////////////////
__global__
static void kernel_vec_norminf(int n, const double *x, double *alpha)
{
__shared__ double reduction_buffer[256];
double t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) t = max(t, fabs(x[i]));
reduction_buffer[threadIdx.x] = t;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] = max(reduction_buffer[threadIdx.x], reduction_buffer[threadIdx.x+stride]);
}
if (threadIdx.x == 0)
alpha[blockIdx.x] = reduction_buffer[0];
}
static axbStatus_t op_vec_norminf(const struct axbVec_s *x, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
double *tmp;
cudaMalloc((void**)&tmp, sizeof(double) * 256); // TODO: Avoid allocation in each call to op_vec_norminf
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
kernel_vec_set_from_host<<<1, 1>>>((int)1, d_alpha, 0);
kernel_vec_norminf<<<256, 256>>>((int)x->size, d_x, tmp);
kernel_vec_norminf<<<1, 256>>>((int)256, tmp, d_alpha);
cudaFree(tmp);
return 0;
}
/////////////////////
__global__
static void kernel_vec_dotnorm2(int n, const double *s, const double *t, double *dot_st, double *norm_t)
{
__shared__ double reduction_buffer[256];
double dot = 0;
double norm = 0;
*dot_st = 0;
*norm_t = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) {
double val_t = t[i];
dot += s[i] * val_t;
norm += val_t * val_t;
}
//
// first reduction for dot
//
reduction_buffer[threadIdx.x] = dot;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(dot_st, reduction_buffer[0]);
//
// second reduction for norm
//
reduction_buffer[threadIdx.x] = norm;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride)
reduction_buffer[threadIdx.x] += reduction_buffer[threadIdx.x+stride];
}
if (threadIdx.x == 0)
atomicAdd(norm_t, reduction_buffer[0]);
}
static axbStatus_t op_vec_dotnorm2(const struct axbVec_s *s, const struct axbVec_s *t, struct axbScalar_s *dot, struct axbScalar_s *norm, void *aux_data)
{
(void)aux_data;
const double *d_s = (const double*)s->data;
const double *d_t = (const double*)t->data;
double *d_dot = (double*)dot->data;
double *d_norm = (double*)norm->data;
kernel_vec_set_from_host<<<1, 1>>>((int)1, d_dot, 0);
kernel_vec_set_from_host<<<1, 1>>>((int)1, d_norm, 0);
kernel_vec_dotnorm2<<<256, 256>>>((int)s->size, d_s, d_t, d_dot, d_norm);
kernel_vec_sqrtabs<<<1, 1>>>((int)1, d_norm);
return 0;
}
/////////////////////
__global__
static void kernel_vec_max(int n, const double *x, int *index, double *alpha)
{
__shared__ double reduction_buffer_max[256];
__shared__ int reduction_buffer_idx[256];
double t = x[0];
int idx = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) {
double xi = x[i];
if (t < xi) {
t = xi;
idx = i;
}
}
reduction_buffer_max[threadIdx.x] = t;
reduction_buffer_idx[threadIdx.x] = idx;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride) {
if (reduction_buffer_max[threadIdx.x] < reduction_buffer_max[threadIdx.x+stride]) {
reduction_buffer_max[threadIdx.x] = reduction_buffer_max[threadIdx.x+stride];
reduction_buffer_idx[threadIdx.x] = reduction_buffer_idx[threadIdx.x+stride];
}
}
}
if (threadIdx.x == 0) {
alpha[blockIdx.x] = reduction_buffer_max[0];
index[blockIdx.x] = reduction_buffer_idx[0];
}
}
static axbStatus_t op_vec_max(const struct axbVec_s *x, size_t *idx, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
//
// TODO: Refactor this! The whole computation can be done without host<->device copies!
//
double *tmp; cudaMalloc((void**)&tmp, sizeof(double) * 256); // TODO: Avoid allocation in each call to op_vec_max
int *index; cudaMalloc((void**)&index, sizeof(int) * 256); // TODO: Avoid allocation in each call to op_vec_max
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
kernel_vec_max<<<256, 256>>>((int)x->size, d_x, index, tmp);
double host_val[256];
cudaMemcpy(host_val, tmp, 256 * sizeof(double), cudaMemcpyDeviceToHost);
int host_idx[256];
cudaMemcpy(host_idx, index, 256 * sizeof(int), cudaMemcpyDeviceToHost);
double val_max = host_val[0];
int idx_max = host_idx[0];
for (size_t i=1; i<256; ++i) {
if (val_max < host_val[i]) {
val_max = host_val[i];
idx_max = host_idx[i];
}
}
*idx = idx_max;
cudaMemcpy(d_alpha, (void*)&val_max, sizeof(double), cudaMemcpyHostToDevice);
cudaFree(tmp);
cudaFree(index);
return 0;
}
/////////////////////
__global__
static void kernel_vec_min(int n, const double *x, int *index, double *alpha)
{
__shared__ double reduction_buffer_min[256];
__shared__ int reduction_buffer_idx[256];
double t = x[0];
int idx = 0;
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) {
double xi = x[i];
if (t > xi) {
t = xi;
idx = i;
}
}
reduction_buffer_min[threadIdx.x] = t;
reduction_buffer_idx[threadIdx.x] = idx;
// parallel reduction
for (unsigned int stride = blockDim.x/2; stride > 0; stride /= 2)
{
__syncthreads();
if (threadIdx.x < stride) {
if (reduction_buffer_min[threadIdx.x] > reduction_buffer_min[threadIdx.x+stride]) {
reduction_buffer_min[threadIdx.x] = reduction_buffer_min[threadIdx.x+stride];
reduction_buffer_idx[threadIdx.x] = reduction_buffer_idx[threadIdx.x+stride];
}
}
}
if (threadIdx.x == 0) {
alpha[blockIdx.x] = reduction_buffer_min[0];
index[blockIdx.x] = reduction_buffer_idx[0];
}
}
static axbStatus_t op_vec_min(const struct axbVec_s *x, size_t *idx, struct axbScalar_s *alpha, void *aux_data)
{
(void)aux_data;
//
// TODO: Refactor this! The whole computation can be done without host<->device copies!
//
double *tmp; cudaMalloc((void**)&tmp, sizeof(double) * 256); // TODO: Avoid allocation in each call to op_vec_max
int *index; cudaMalloc((void**)&index, sizeof(int) * 256); // TODO: Avoid allocation in each call to op_vec_max
const double *d_x = (const double*)x->data;
double *d_alpha = (double*)alpha->data;
kernel_vec_min<<<256, 256>>>((int)x->size, d_x, index, tmp);
double host_val[256];
cudaMemcpy(host_val, tmp, 256 * sizeof(double), cudaMemcpyDeviceToHost);
int host_idx[256];
cudaMemcpy(host_idx, index, 256 * sizeof(int), cudaMemcpyDeviceToHost);
double val_min = host_val[0];
int idx_min = host_idx[0];
for (size_t i=1; i<256; ++i) {
if (val_min > host_val[i]) {
val_min = host_val[i];
idx_min = host_idx[i];
}
}
*idx = idx_min;
cudaMemcpy(d_alpha, (void*)&val_min, sizeof(double), cudaMemcpyHostToDevice);
cudaFree(tmp);
cudaFree(index);
return 0;
}
//
// Vector-vector operations
//
__global__
static void kernel_vec_copy(int n, const double *x, double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) y[i] = x[i];
}
static axbStatus_t op_vec_copy(const struct axbVec_s *x, struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
const double *d_x = (const double*)x->data;
double *d_y = (double*)y->data;
kernel_vec_copy<<<256, 256>>>((int)x->size, d_x, d_y);
return 0;
}
/////////////////////
__global__
static void kernel_vec_swap(int n, double *x, double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) {
double t = y[i];
y[i] = x[i];
x[i] = t;
}
}
static axbStatus_t op_vec_swap(struct axbVec_s *x, struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
double *d_x = (double*)x->data;
double *d_y = (double*)y->data;
kernel_vec_swap<<<256, 256>>>((int)x->size, d_x, d_y);
return 0;
}
/////////////////////
__global__
static void kernel_vec_axpy(int n, double *y, const double *alpha, const double *x)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) y[i] = *alpha * x[i] + y[i];
}
static axbStatus_t op_vec_axpy(struct axbVec_s *y, const struct axbScalar_s *alpha, const struct axbVec_s *x, void *aux_data)
{
(void)aux_data;
double *d_y = (double*)y->data;
const double *d_alpha = (const double*)alpha->data;
const double *d_x = (const double*)x->data;
kernel_vec_axpy<<<256, 256>>>((int)y->size, d_y, d_alpha, d_x);
return 0;
}
/////////////////////
__global__
static void kernel_vec_aypx(int n, double *y, const double *alpha, const double *x)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) y[i] = *alpha * y[i] + x[i];
}
static axbStatus_t op_vec_aypx(struct axbVec_s *y, const struct axbScalar_s *alpha, const struct axbVec_s *x, void *aux_data)
{
(void)aux_data;
double *d_y = (double*)y->data;
const double *d_alpha = (const double*)alpha->data;
const double *d_x = (const double*)x->data;
kernel_vec_aypx<<<256, 256>>>((int)y->size, d_y, d_alpha, d_x);
return 0;
}
/////////////////////
__global__
static void kernel_vec_axpbypcz(int n, double *z, const double *alpha, const double *beta, const double *gamma, const double *x, const double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) z[i] = *alpha * x[i] + *beta * y[i] + *gamma * z[i];
}
static axbStatus_t op_vec_axpbypcz(struct axbVec_s *z, const struct axbScalar_s *alpha, const struct axbScalar_s *beta, const struct axbScalar_s *gamma, const struct axbVec_s *x, const struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
double *d_z = (double*)z->data;
const double *d_alpha = (const double*)alpha->data;
const double *d_beta = (const double*)beta->data;
const double *d_gamma = (const double*)gamma->data;
const double *d_x = (const double*)x->data;
const double *d_y = (const double*)y->data;
kernel_vec_axpbypcz<<<256, 256>>>((int)z->size, d_z, d_alpha, d_beta, d_gamma, d_x, d_y);
return 0;
}
/////////////////////
__global__
static void kernel_vec_waxpy(int n, double *w, const double *alpha, const double *x, const double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) w[i] = *alpha * x[i] + y[i];
}
static axbStatus_t op_vec_waxpy(struct axbVec_s *w, const struct axbScalar_s *alpha, const struct axbVec_s *x, const struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
double *d_w = (double*)w->data;
const double *d_alpha = (const double*)alpha->data;
const double *d_x = (const double*)x->data;
const double *d_y = (const double*)y->data;
kernel_vec_waxpy<<<256, 256>>>((int)w->size, d_w, d_alpha, d_x, d_y);
return 0;
}
/////////////////////
static axbStatus_t op_vec_maxpy(struct axbVec_s *y, size_t num_vecs, const struct axbScalar_s **alpha, const struct axbVec_s **x, void *aux_data) {
// TODO: Be more efficient than this!
for (size_t i=0; i<num_vecs; ++i)
op_vec_axpy(y, alpha[i], x[i], aux_data);
return 0;
}
/////////////////////
__global__
static void kernel_vec_pointwisemult(int n, double *w, const double *x, const double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) w[i] = x[i] * y[i];
}
static axbStatus_t op_vec_pointwisemult(struct axbVec_s *w, const struct axbVec_s *x, const struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
double *d_w = (double*)w->data;
const double *d_x = (const double*)x->data;
const double *d_y = (const double*)y->data;
kernel_vec_pointwisemult<<<256, 256>>>((int)w->size, d_w, d_x, d_y);
return 0;
}
/////////////////////
__global__
static void kernel_vec_pointwisediv(int n, double *w, const double *x, const double *y)
{
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<n; i += gridDim.x * blockDim.x) w[i] = x[i] / y[i];
}
static axbStatus_t op_vec_pointwisediv(struct axbVec_s *w, const struct axbVec_s *x, const struct axbVec_s *y, void *aux_data)
{
(void)aux_data;
double *d_w = (double*)w->data;
const double *d_x = (const double*)x->data;
const double *d_y = (const double*)y->data;
kernel_vec_pointwisediv<<<256, 256>>>((int)w->size, d_w, d_x, d_y);
return 0;
}
/////////////////////
extern "C" axbStatus_t axbOpBackendRegister_CUDA(struct axbHandle_s *handle)
{
struct axbOpBackend_s *cuda_backend;
axbStatus_t status = axbOpBackendCreate(&cuda_backend); AXB_ERRCHK(status);
// populate host_backend:
status = axbOpBackendSetName(cuda_backend, "CUDA"); AXB_ERRCHK(status);
axbOperationID_t op_id = 0;
#define AXB_ADD_OPERATION(OPNAME, ENUMCONSTANT) status = axbOpBackendAddOperation(cuda_backend, #OPNAME, (axbStatus_t (*)(void))OPNAME, NULL, &op_id); AXB_ERRCHK(status); assert(op_id == ENUMCONSTANT && "Logic error: op_id != " #ENUMCONSTANT)
// inplace operations
AXB_ADD_OPERATION(op_vec_set, AXB_OP_VEC_SET);
AXB_ADD_OPERATION(op_vec_sqrtabs, AXB_OP_VEC_SQRTABS);
AXB_ADD_OPERATION(op_vec_zero, AXB_OP_VEC_ZERO);
AXB_ADD_OPERATION(op_vec_scale, AXB_OP_VEC_SCALE);
// reduction operations
AXB_ADD_OPERATION(op_vec_sum, AXB_OP_VEC_SUM);
AXB_ADD_OPERATION(op_vec_dot, AXB_OP_VEC_DOT);
AXB_ADD_OPERATION(op_vec_tdot, AXB_OP_VEC_TDOT);
AXB_ADD_OPERATION(op_vec_mdot, AXB_OP_VEC_MDOT);
AXB_ADD_OPERATION(op_vec_norm1, AXB_OP_VEC_NORM1);
AXB_ADD_OPERATION(op_vec_norm2, AXB_OP_VEC_NORM2);
AXB_ADD_OPERATION(op_vec_norminf, AXB_OP_VEC_NORMINF);
AXB_ADD_OPERATION(op_vec_dotnorm2, AXB_OP_VEC_DOTNORM2);
AXB_ADD_OPERATION(op_vec_max, AXB_OP_VEC_MAX);
AXB_ADD_OPERATION(op_vec_min, AXB_OP_VEC_MIN);
// vector-vector operations
AXB_ADD_OPERATION(op_vec_copy, AXB_OP_VEC_COPY);
AXB_ADD_OPERATION(op_vec_swap, AXB_OP_VEC_SWAP);
AXB_ADD_OPERATION(op_vec_axpy, AXB_OP_VEC_AXPY);
AXB_ADD_OPERATION(op_vec_aypx, AXB_OP_VEC_AYPX);
AXB_ADD_OPERATION(op_vec_axpbypcz, AXB_OP_VEC_AXPBYPCZ);
AXB_ADD_OPERATION(op_vec_waxpy, AXB_OP_VEC_WAXPY);
AXB_ADD_OPERATION(op_vec_maxpy, AXB_OP_VEC_MAXPY);
AXB_ADD_OPERATION(op_vec_pointwisemult, AXB_OP_VEC_POINTWISEMULT);
AXB_ADD_OPERATION(op_vec_pointwisediv, AXB_OP_VEC_POINTWISEDIV);
#undef AXB_ADD_OPERATION
// push into enclosing context identified by handle:
status = axbOpBackendRegister(handle, cuda_backend); AXB_ERRCHK(status);
return 0;
}
|
1215b2dadf3c624e7a9529b46be4b5bd9df2609d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../include/RunKernel.h"
__global__ void simple_vbo_kernel(float3 *pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width + x] = make_float3(u, w, v);
}
__global__ void color_kernel(float4 *pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float xx = x / (float)width;
float yy = y / (float)height;
float u = 0.5f+ 0.5f*cosf(time + xx + 0);
float v = 0.5f+ 0.5f*cosf(time + yy + 2);
float w = 0.5f + 0.5f*cosf(time + xx + 4);
// write output vertex
pos[y*width + x] = make_float4(u, v, w,1.0);
}
__global__ void grid_kernel(float3 * pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = pos[x + y * width].x;
float v = pos[x + y * width].z;
float freq = 4.0f;
pos[x + y * width].x = u;
pos[x + y * width].z = v;
pos[x + y * width].y = 0.5*(u*u-(v-time)*(v-time));
}
__global__ void gerstenerKernel(float3 * pos, unsigned int width, unsigned int height, float amplitude,float time)
{
unsigned int x = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int y = threadIdx.y + blockDim.y * blockIdx.y;
float u = (float)x /float(width);
float v = (float)y / float(height);
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
u += 0.01f;
v += 0.01f;
float dix1 = -(u) / (sqrt((u*u) +(v*v) ));
float diy1 = -(v) / sqrt((u*u)+(v*v));
float dix2 = (u - 0.45f) / (sqrt((u - 0.45f)*(u - 0.45f) + (v - 0.45f)*(v - 0.45f)));
float diy2 = (v-0.45f) / sqrt((u - 0.45f)*(u - 0.45f) + (v - 0.45f)*(v - 0.45f));
float wi = 0.7;
float q = 1.7;
float posx = u+ q*amplitude*dix1*cos((wi*((dix1)) +time)* 180 / 3.141592)+ q * amplitude*dix2*cos((wi*((dix2)) + time) * 180 / 3.141592);
float posz = v+ q*amplitude*diy1*cos((wi*((diy1)) + time)* 180 / 3.141592)+ q * amplitude*diy2*cos((wi*((diy2)) + time) * 180 / 3.141592);
float posy = amplitude*sin((wi*((dix1*u) + (diy1*v)) + time)*180 / 3.141592)+ amplitude * sin((wi*((dix2*u) + (diy2*v)) + time) * 180 / 3.141592);
pos[x + width * y] = make_float3(posx,posy,posz);
}
__global__ void multiWaveGerstenerKernelWithNormals(float3 * pos, float3 * norms ,unsigned int width, unsigned int height, WaveProp * prop, int n, float time)
{
unsigned int x = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int y = threadIdx.y + blockDim.y * blockIdx.y;
float u = (float)x / float(width);
float v = (float)y / float(height);
u = 2.0f*u - 1.0f;
v = 2.0f*v - 1.0f;
u += 0.01f;
v += 0.01f;
float posx = u;
float posz = v;
float posy = 0;
float normx = 0;
float normy = 0;
float normz = 0;
for (int i = 0; i < n; i++)
{
float dix = 0.0f, diy = 0.0f;
if (prop[i].isCircular == 0xFF)
{
float x = prop[i].x;
float y = prop[i].y;
x = u - x;
y = v - y;
dix = (x) / (sqrt(x*x + y * y));
diy = (y) / (sqrt(y*y + y * y));
}
else
{
dix = prop[i].dirx;
diy = prop[i].diry;
}
float amplitude = prop[i].amplitude;
float q = prop[i].q;
float wi = prop[i].w;
float phi = prop[i].phase;
posx += q * amplitude*dix*cos((wi*((dix)) + time * phi) * 180 / 3.141592);
posz += q * amplitude*diy*cos((wi*((diy)) + time * phi) * 180 / 3.141592);
posy += amplitude * sin((wi*((dix*u) + (diy*v)) + time * phi) * 180 / 3.141592);
normx += dix * wi * amplitude * sin((wi*(dix*u + diy * v) + time * phi) * 180 / 3.141592);
normz += diy * wi * amplitude * sin(((wi*(dix*u + diy * v)) + time * phi) * 180 / 3.141592);
normy += q * wi * amplitude * cos((wi*(dix*u + diy * v) + time * phi) * 180 / 3.141592);
}
normx -= normx;
normz -= normz;
normy = 1 - normy;
pos[x + width * y] = make_float3(posx, posy, posz);
norms[x + width * y] = make_float3(normx, normy, normz);
}
__global__ void multiWaveGerstenerKernel(float3 * pos,unsigned int width, unsigned int height, WaveProp * prop,int n ,float time)
{
unsigned int x = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int y = threadIdx.y + blockDim.y * blockIdx.y;
float u = (float)x / float(width);
float v = (float)y / float(height);
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
u += 0.01f;
v += 0.01f;
float posx = u;
float posz = v;
float posy = 0;
for (int i = 0; i < n; i++)
{
float dix=0.0f, diy=0.0f;
if (prop[i].isCircular == 0xFF)
{
float x = prop[i].x;
float y = prop[i].y;
x = u - x;
y = v - y;
dix = (x) / (sqrt(x*x + y*y));
diy = (y) / (sqrt(y*y + y*y));
}
else
{
dix = prop[i].dirx;
diy = prop[i].diry;
}
float amplitude = prop[i].amplitude;
float q = prop[i].q;
float wi = prop[i].w;
float phi = prop[i].phase;
posx += q * amplitude*dix*cos((wi*((dix)) + time*phi) * 180 / 3.141592);
posz += q * amplitude*diy*cos((wi*((diy)) + time*phi) * 180 / 3.141592);
posy += amplitude * sin((wi*((dix*u) + (diy*v)) + time*phi) * 180 / 3.141592);
}
pos[x + width * y] = make_float3(posx, posy, posz);
}
void UpdateMesh(float3 *pos, unsigned int mesh_width,
unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
simple_vbo_kernel << < grid, block >> > (pos, mesh_width, mesh_height, time);
}
void GerstnerTest(float3 * pos, unsigned int mesh_width, unsigned int mesh_height, float amplitude,float time)
{
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
gerstenerKernel << < grid, block >> > (pos, mesh_width, mesh_height,amplitude, time);
}
void GerstnerNormalTest(float3 * pos, float3 * norms,WaveProp * prop, MeshProp mesh, int n, float time)
{
dim3 block(8, 8, 1);
dim3 grid(mesh.mesh_width / block.x, mesh.mesh_height / block.y, 1);
WaveProp * dev_ptr;
checkCudaErrors(hipMalloc(&dev_ptr, sizeof(WaveProp)*n));
checkCudaErrors(hipMemcpy(dev_ptr, prop, sizeof(WaveProp)*n, hipMemcpyHostToDevice));
multiWaveGerstenerKernelWithNormals << <grid, block >> > (pos, norms,mesh.mesh_width, mesh.mesh_height, dev_ptr, n, time);
getLastCudaError("Cuda Kernel Launch failed");
checkCudaErrors(hipFree(dev_ptr));
}
void GerstnerTest(float3 * pos, WaveProp * prop, MeshProp mesh,int n ,float time)
{
dim3 block(8, 8, 1);
dim3 grid(mesh.mesh_width / block.x, mesh.mesh_height / block.y, 1);
WaveProp * dev_ptr;
checkCudaErrors(hipMalloc(&dev_ptr,sizeof(WaveProp)*n));
checkCudaErrors(hipMemcpy(dev_ptr, prop, sizeof(WaveProp)*n, hipMemcpyHostToDevice));
multiWaveGerstenerKernel<< < grid, block >> > (pos, mesh.mesh_width, mesh.mesh_height, dev_ptr,n, time);
getLastCudaError("Cuda Kernel Launch failed");
checkCudaErrors(hipFree(dev_ptr));
}
void UpdateColors(float4 * pos, unsigned int width, unsigned int height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(width / block.x, height / block.y, 1);
color_kernel << < grid, block >> > (pos, width, height, time);
}
void UpdateGrid(float3 * pos, unsigned int width, unsigned int height, float time)
{
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
grid_kernel << <grid, block >> > (pos, width, height, time);
}
| 1215b2dadf3c624e7a9529b46be4b5bd9df2609d.cu | #include <cuda_runtime.h>
#include "../include/RunKernel.h"
__global__ void simple_vbo_kernel(float3 *pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width + x] = make_float3(u, w, v);
}
__global__ void color_kernel(float4 *pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float xx = x / (float)width;
float yy = y / (float)height;
float u = 0.5f+ 0.5f*cosf(time + xx + 0);
float v = 0.5f+ 0.5f*cosf(time + yy + 2);
float w = 0.5f + 0.5f*cosf(time + xx + 4);
// write output vertex
pos[y*width + x] = make_float4(u, v, w,1.0);
}
__global__ void grid_kernel(float3 * pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = pos[x + y * width].x;
float v = pos[x + y * width].z;
float freq = 4.0f;
pos[x + y * width].x = u;
pos[x + y * width].z = v;
pos[x + y * width].y = 0.5*(u*u-(v-time)*(v-time));
}
__global__ void gerstenerKernel(float3 * pos, unsigned int width, unsigned int height, float amplitude,float time)
{
unsigned int x = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int y = threadIdx.y + blockDim.y * blockIdx.y;
float u = (float)x /float(width);
float v = (float)y / float(height);
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
u += 0.01f;
v += 0.01f;
float dix1 = -(u) / (sqrt((u*u) +(v*v) ));
float diy1 = -(v) / sqrt((u*u)+(v*v));
float dix2 = (u - 0.45f) / (sqrt((u - 0.45f)*(u - 0.45f) + (v - 0.45f)*(v - 0.45f)));
float diy2 = (v-0.45f) / sqrt((u - 0.45f)*(u - 0.45f) + (v - 0.45f)*(v - 0.45f));
float wi = 0.7;
float q = 1.7;
float posx = u+ q*amplitude*dix1*cos((wi*((dix1)) +time)* 180 / 3.141592)+ q * amplitude*dix2*cos((wi*((dix2)) + time) * 180 / 3.141592);
float posz = v+ q*amplitude*diy1*cos((wi*((diy1)) + time)* 180 / 3.141592)+ q * amplitude*diy2*cos((wi*((diy2)) + time) * 180 / 3.141592);
float posy = amplitude*sin((wi*((dix1*u) + (diy1*v)) + time)*180 / 3.141592)+ amplitude * sin((wi*((dix2*u) + (diy2*v)) + time) * 180 / 3.141592);
pos[x + width * y] = make_float3(posx,posy,posz);
}
__global__ void multiWaveGerstenerKernelWithNormals(float3 * pos, float3 * norms ,unsigned int width, unsigned int height, WaveProp * prop, int n, float time)
{
unsigned int x = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int y = threadIdx.y + blockDim.y * blockIdx.y;
float u = (float)x / float(width);
float v = (float)y / float(height);
u = 2.0f*u - 1.0f;
v = 2.0f*v - 1.0f;
u += 0.01f;
v += 0.01f;
float posx = u;
float posz = v;
float posy = 0;
float normx = 0;
float normy = 0;
float normz = 0;
for (int i = 0; i < n; i++)
{
float dix = 0.0f, diy = 0.0f;
if (prop[i].isCircular == 0xFF)
{
float x = prop[i].x;
float y = prop[i].y;
x = u - x;
y = v - y;
dix = (x) / (sqrt(x*x + y * y));
diy = (y) / (sqrt(y*y + y * y));
}
else
{
dix = prop[i].dirx;
diy = prop[i].diry;
}
float amplitude = prop[i].amplitude;
float q = prop[i].q;
float wi = prop[i].w;
float phi = prop[i].phase;
posx += q * amplitude*dix*cos((wi*((dix)) + time * phi) * 180 / 3.141592);
posz += q * amplitude*diy*cos((wi*((diy)) + time * phi) * 180 / 3.141592);
posy += amplitude * sin((wi*((dix*u) + (diy*v)) + time * phi) * 180 / 3.141592);
normx += dix * wi * amplitude * sin((wi*(dix*u + diy * v) + time * phi) * 180 / 3.141592);
normz += diy * wi * amplitude * sin(((wi*(dix*u + diy * v)) + time * phi) * 180 / 3.141592);
normy += q * wi * amplitude * cos((wi*(dix*u + diy * v) + time * phi) * 180 / 3.141592);
}
normx -= normx;
normz -= normz;
normy = 1 - normy;
pos[x + width * y] = make_float3(posx, posy, posz);
norms[x + width * y] = make_float3(normx, normy, normz);
}
__global__ void multiWaveGerstenerKernel(float3 * pos,unsigned int width, unsigned int height, WaveProp * prop,int n ,float time)
{
unsigned int x = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int y = threadIdx.y + blockDim.y * blockIdx.y;
float u = (float)x / float(width);
float v = (float)y / float(height);
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
u += 0.01f;
v += 0.01f;
float posx = u;
float posz = v;
float posy = 0;
for (int i = 0; i < n; i++)
{
float dix=0.0f, diy=0.0f;
if (prop[i].isCircular == 0xFF)
{
float x = prop[i].x;
float y = prop[i].y;
x = u - x;
y = v - y;
dix = (x) / (sqrt(x*x + y*y));
diy = (y) / (sqrt(y*y + y*y));
}
else
{
dix = prop[i].dirx;
diy = prop[i].diry;
}
float amplitude = prop[i].amplitude;
float q = prop[i].q;
float wi = prop[i].w;
float phi = prop[i].phase;
posx += q * amplitude*dix*cos((wi*((dix)) + time*phi) * 180 / 3.141592);
posz += q * amplitude*diy*cos((wi*((diy)) + time*phi) * 180 / 3.141592);
posy += amplitude * sin((wi*((dix*u) + (diy*v)) + time*phi) * 180 / 3.141592);
}
pos[x + width * y] = make_float3(posx, posy, posz);
}
void UpdateMesh(float3 *pos, unsigned int mesh_width,
unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
simple_vbo_kernel << < grid, block >> > (pos, mesh_width, mesh_height, time);
}
void GerstnerTest(float3 * pos, unsigned int mesh_width, unsigned int mesh_height, float amplitude,float time)
{
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
gerstenerKernel << < grid, block >> > (pos, mesh_width, mesh_height,amplitude, time);
}
void GerstnerNormalTest(float3 * pos, float3 * norms,WaveProp * prop, MeshProp mesh, int n, float time)
{
dim3 block(8, 8, 1);
dim3 grid(mesh.mesh_width / block.x, mesh.mesh_height / block.y, 1);
WaveProp * dev_ptr;
checkCudaErrors(cudaMalloc(&dev_ptr, sizeof(WaveProp)*n));
checkCudaErrors(cudaMemcpy(dev_ptr, prop, sizeof(WaveProp)*n, cudaMemcpyHostToDevice));
multiWaveGerstenerKernelWithNormals << <grid, block >> > (pos, norms,mesh.mesh_width, mesh.mesh_height, dev_ptr, n, time);
getLastCudaError("Cuda Kernel Launch failed");
checkCudaErrors(cudaFree(dev_ptr));
}
void GerstnerTest(float3 * pos, WaveProp * prop, MeshProp mesh,int n ,float time)
{
dim3 block(8, 8, 1);
dim3 grid(mesh.mesh_width / block.x, mesh.mesh_height / block.y, 1);
WaveProp * dev_ptr;
checkCudaErrors(cudaMalloc(&dev_ptr,sizeof(WaveProp)*n));
checkCudaErrors(cudaMemcpy(dev_ptr, prop, sizeof(WaveProp)*n, cudaMemcpyHostToDevice));
multiWaveGerstenerKernel<< < grid, block >> > (pos, mesh.mesh_width, mesh.mesh_height, dev_ptr,n, time);
getLastCudaError("Cuda Kernel Launch failed");
checkCudaErrors(cudaFree(dev_ptr));
}
void UpdateColors(float4 * pos, unsigned int width, unsigned int height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(width / block.x, height / block.y, 1);
color_kernel << < grid, block >> > (pos, width, height, time);
}
void UpdateGrid(float3 * pos, unsigned int width, unsigned int height, float time)
{
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
grid_kernel << <grid, block >> > (pos, width, height, time);
}
|
98eda9b282bb0518fdc46f72292ec9abcbf9ed8f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* naive_cuckoo.cu
*
* Created on: 03-05-2015
* Author: Karol Dzitkowski
*/
#include "cuckoo_hash.hpp"
#include "macros.h"
#include "constants.h"
#include "hash_function.cuh"
#include "naive_cuckoo_hash.cuh"
#include <random>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <thrust/gather.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/device_vector.h>
#include <hip/hip_runtime_api.h>
__global__ void cuckooRefillStencilKernel(int2* values, int values_size, int2* hashMap,
int* stencil, int stencil_size, unsigned seed)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunction(seed, value.x, stencil_size);
stencil[hash] = hashMap[hash].x != EMPTY_BUCKET_KEY ? 1 : 0;
}
__global__ void cuckooFillKernel(int2* values, int values_size, int2* hashMap,
int hashMap_size, unsigned seed)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunction(seed, value.x, hashMap_size);
hashMap[hash] = value;
}
__global__ void cuckooCheckKernel(int2* values, int values_size, int2* hashMap,
int hashMap_size, int* result, unsigned seed)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunction(seed, value.x, hashMap_size);
result[idx] = hashMap[hash].x != value.x ? 1 : 0;
}
struct is_true
{
__host__ __device__
bool operator()(const int x)
{
return x == 1;
}
};
__host__ thrust::device_vector<int2>
cuckooFillHashMap(int2* values, int size, int2* hashMap, int hashMap_size, unsigned seed)
{
int block_size = CUCKOO_HASHING_BLOCK_SIZE;
int block_cnt = (size + block_size - 1) / block_size;
thrust::device_vector<int2> result_vector;
thrust::device_ptr<int2> hashMap_ptr(hashMap);
int* stencil;
int stencil_size = hashMap_size;
// CREATE STENCIL CONTAINING 1 WHERE SOME ELEMENT WANTS TO BE PUT
CUDA_CALL( hipMalloc((void**)&stencil, stencil_size*sizeof(int)) );
CUDA_CALL( hipMemset(stencil, 0, stencil_size*sizeof(int)) );
hipLaunchKernelGGL(( cuckooRefillStencilKernel), dim3(block_size), dim3(block_cnt), 0, 0, values, size, hashMap, stencil, stencil_size, seed);
hipDeviceSynchronize();
thrust::device_ptr<int> stencil_ptr(stencil);
// PrintStencil(stencil_ptr, hashMap_size, "First Stencil:");
// resize result_vector to fit additional data pointed by stencil
int cnt_1 = thrust::reduce(stencil_ptr, stencil_ptr + hashMap_size);
// printf("Cnt 1 = %d\n", cnt_1);
result_vector.resize(result_vector.size()+cnt_1);
// COPY ELEMENTS INDICATED BY STENCIL TO RESULT VECTOR
thrust::copy_if(hashMap_ptr, (hashMap_ptr+hashMap_size), stencil_ptr, result_vector.data(), is_true());
hipDeviceSynchronize();
CUDA_CALL( hipFree(stencil) );
// PrintDeviceVector(result_vector, "Result Vector: ");
// PUT ELEMENTS IN HASH MAP
hipLaunchKernelGGL(( cuckooFillKernel), dim3(block_size), dim3(block_cnt), 0, 0, values, size, hashMap, hashMap_size, seed);
hipDeviceSynchronize();
// CHECK IF MULTIPLE VALUES WERE NOT PUT IN SAME BUCKET AND CREATE A STENCIL OF THEM
CUDA_CALL( hipMalloc((void**)&stencil, size*sizeof(int)) );
CUDA_CALL( hipMemset(stencil, 0, size*sizeof(int)) );
hipLaunchKernelGGL(( cuckooCheckKernel), dim3(block_size), dim3(block_cnt), 0, 0, values, size, hashMap, hashMap_size, stencil, seed);
hipDeviceSynchronize();
stencil_ptr = thrust::device_pointer_cast(stencil);
// PrintStencil(stencil_ptr, size, "Second Stencil:");
// resize result_vector to fit additional data pointed by stencil
int cnt_2 = thrust::reduce(stencil_ptr, stencil_ptr + size);
// printf("Cnt 2 = %d\n", cnt_2);
result_vector.resize(result_vector.size()+cnt_2);
// COPY ELEMENTS THAT DIDNT FIT TO HASH MAP TO RESULT VECTOR
thrust::device_ptr<int2> values_ptr(values);
thrust::copy_if(values_ptr, values_ptr + size, stencil_ptr, result_vector.data()+cnt_1, is_true());
hipDeviceSynchronize();
CUDA_CALL( hipFree(stencil) );
// PrintDeviceVector(result_vector, "Result Vector: ");
result_vector.shrink_to_fit();
return result_vector;
}
template<unsigned N>
bool naive_cuckooHash(int2* values, int in_size, int2* hashMap, int hashMap_size, Constants<N> constants)
{
int i = 1, k = 0;
auto collisions = cuckooFillHashMap(values, in_size, hashMap, hashMap_size, constants.values[i]);
while(collisions.size() && k++ < MAX_RETRIES)
{
collisions = cuckooFillHashMap(collisions.data().get(), collisions.size(), hashMap, hashMap_size, constants.values[i]);
i = (i+1)%N;
}
return collisions.size() == 0;
}
__constant__ unsigned const_seeds[MAX_HASH_FUNC_NO];
__global__ void cuckooRetrieveKernel(
int* keys,
int size,
int2* hashMap,
int hashMap_size,
int2* out,
unsigned N)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size) return;
int key = keys[idx];
int hash = hashFunction(const_seeds[0], key, hashMap_size);
int2 entry = hashMap[hash];
for(int i=1; i<N && entry.x != key; i++)
{
hash = hashFunction(const_seeds[i], key, hashMap_size);
entry = hashMap[hash];
}
if(entry.x != key)
{
entry.x = EMPTY_BUCKET_KEY;
entry.y = EMPTY_BUCKET_KEY;
}
out[idx] = entry;
}
template<unsigned N>
int2* naive_cuckooRetrieve(int* keys, int size, int2* hashMap, int hashMap_size, Constants<N> constants)
{
int block_size = CUCKOO_HASHING_BLOCK_SIZE;
int block_cnt = (size + block_size - 1) / block_size;
int2* result;
CUDA_CALL( hipMalloc((void**)&result, size*sizeof(int2)) );
hipMemcpyToSymbol(const_seeds, &constants.values, N*sizeof(unsigned));
hipLaunchKernelGGL(( cuckooRetrieveKernel), dim3(block_size), dim3(block_cnt), 0, 0, keys, size, hashMap, hashMap_size, result, N);
hipDeviceSynchronize();
return result;
}
template bool naive_cuckooHash<2>(int2*, int, int2*, int, Constants<2>);
template bool naive_cuckooHash<3>(int2*, int, int2*, int, Constants<3>);
template bool naive_cuckooHash<4>(int2*, int, int2*, int, Constants<4>);
template bool naive_cuckooHash<5>(int2*, int, int2*, int, Constants<5>);
template int2* naive_cuckooRetrieve<2>(int*, int, int2*, int, Constants<2>);
template int2* naive_cuckooRetrieve<3>(int*, int, int2*, int, Constants<3>);
template int2* naive_cuckooRetrieve<4>(int*, int, int2*, int, Constants<4>);
template int2* naive_cuckooRetrieve<5>(int*, int, int2*, int, Constants<5>);
| 98eda9b282bb0518fdc46f72292ec9abcbf9ed8f.cu | /*
* naive_cuckoo.cu
*
* Created on: 03-05-2015
* Author: Karol Dzitkowski
*/
#include "cuckoo_hash.hpp"
#include "macros.h"
#include "constants.h"
#include "hash_function.cuh"
#include "naive_cuckoo_hash.cuh"
#include <random>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <thrust/gather.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/device_vector.h>
#include <cuda_runtime_api.h>
__global__ void cuckooRefillStencilKernel(int2* values, int values_size, int2* hashMap,
int* stencil, int stencil_size, unsigned seed)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunction(seed, value.x, stencil_size);
stencil[hash] = hashMap[hash].x != EMPTY_BUCKET_KEY ? 1 : 0;
}
__global__ void cuckooFillKernel(int2* values, int values_size, int2* hashMap,
int hashMap_size, unsigned seed)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunction(seed, value.x, hashMap_size);
hashMap[hash] = value;
}
__global__ void cuckooCheckKernel(int2* values, int values_size, int2* hashMap,
int hashMap_size, int* result, unsigned seed)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= values_size) return;
int2 value = values[idx];
int hash = hashFunction(seed, value.x, hashMap_size);
result[idx] = hashMap[hash].x != value.x ? 1 : 0;
}
struct is_true
{
__host__ __device__
bool operator()(const int x)
{
return x == 1;
}
};
__host__ thrust::device_vector<int2>
cuckooFillHashMap(int2* values, int size, int2* hashMap, int hashMap_size, unsigned seed)
{
int block_size = CUCKOO_HASHING_BLOCK_SIZE;
int block_cnt = (size + block_size - 1) / block_size;
thrust::device_vector<int2> result_vector;
thrust::device_ptr<int2> hashMap_ptr(hashMap);
int* stencil;
int stencil_size = hashMap_size;
// CREATE STENCIL CONTAINING 1 WHERE SOME ELEMENT WANTS TO BE PUT
CUDA_CALL( cudaMalloc((void**)&stencil, stencil_size*sizeof(int)) );
CUDA_CALL( cudaMemset(stencil, 0, stencil_size*sizeof(int)) );
cuckooRefillStencilKernel<<<block_size, block_cnt>>>(values, size, hashMap, stencil, stencil_size, seed);
cudaDeviceSynchronize();
thrust::device_ptr<int> stencil_ptr(stencil);
// PrintStencil(stencil_ptr, hashMap_size, "First Stencil:");
// resize result_vector to fit additional data pointed by stencil
int cnt_1 = thrust::reduce(stencil_ptr, stencil_ptr + hashMap_size);
// printf("Cnt 1 = %d\n", cnt_1);
result_vector.resize(result_vector.size()+cnt_1);
// COPY ELEMENTS INDICATED BY STENCIL TO RESULT VECTOR
thrust::copy_if(hashMap_ptr, (hashMap_ptr+hashMap_size), stencil_ptr, result_vector.data(), is_true());
cudaDeviceSynchronize();
CUDA_CALL( cudaFree(stencil) );
// PrintDeviceVector(result_vector, "Result Vector: ");
// PUT ELEMENTS IN HASH MAP
cuckooFillKernel<<<block_size, block_cnt>>>(values, size, hashMap, hashMap_size, seed);
cudaDeviceSynchronize();
// CHECK IF MULTIPLE VALUES WERE NOT PUT IN SAME BUCKET AND CREATE A STENCIL OF THEM
CUDA_CALL( cudaMalloc((void**)&stencil, size*sizeof(int)) );
CUDA_CALL( cudaMemset(stencil, 0, size*sizeof(int)) );
cuckooCheckKernel<<<block_size, block_cnt>>>(values, size, hashMap, hashMap_size, stencil, seed);
cudaDeviceSynchronize();
stencil_ptr = thrust::device_pointer_cast(stencil);
// PrintStencil(stencil_ptr, size, "Second Stencil:");
// resize result_vector to fit additional data pointed by stencil
int cnt_2 = thrust::reduce(stencil_ptr, stencil_ptr + size);
// printf("Cnt 2 = %d\n", cnt_2);
result_vector.resize(result_vector.size()+cnt_2);
// COPY ELEMENTS THAT DIDNT FIT TO HASH MAP TO RESULT VECTOR
thrust::device_ptr<int2> values_ptr(values);
thrust::copy_if(values_ptr, values_ptr + size, stencil_ptr, result_vector.data()+cnt_1, is_true());
cudaDeviceSynchronize();
CUDA_CALL( cudaFree(stencil) );
// PrintDeviceVector(result_vector, "Result Vector: ");
result_vector.shrink_to_fit();
return result_vector;
}
template<unsigned N>
bool naive_cuckooHash(int2* values, int in_size, int2* hashMap, int hashMap_size, Constants<N> constants)
{
int i = 1, k = 0;
auto collisions = cuckooFillHashMap(values, in_size, hashMap, hashMap_size, constants.values[i]);
while(collisions.size() && k++ < MAX_RETRIES)
{
collisions = cuckooFillHashMap(collisions.data().get(), collisions.size(), hashMap, hashMap_size, constants.values[i]);
i = (i+1)%N;
}
return collisions.size() == 0;
}
__constant__ unsigned const_seeds[MAX_HASH_FUNC_NO];
__global__ void cuckooRetrieveKernel(
int* keys,
int size,
int2* hashMap,
int hashMap_size,
int2* out,
unsigned N)
{
unsigned long int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size) return;
int key = keys[idx];
int hash = hashFunction(const_seeds[0], key, hashMap_size);
int2 entry = hashMap[hash];
for(int i=1; i<N && entry.x != key; i++)
{
hash = hashFunction(const_seeds[i], key, hashMap_size);
entry = hashMap[hash];
}
if(entry.x != key)
{
entry.x = EMPTY_BUCKET_KEY;
entry.y = EMPTY_BUCKET_KEY;
}
out[idx] = entry;
}
template<unsigned N>
int2* naive_cuckooRetrieve(int* keys, int size, int2* hashMap, int hashMap_size, Constants<N> constants)
{
int block_size = CUCKOO_HASHING_BLOCK_SIZE;
int block_cnt = (size + block_size - 1) / block_size;
int2* result;
CUDA_CALL( cudaMalloc((void**)&result, size*sizeof(int2)) );
cudaMemcpyToSymbol(const_seeds, &constants.values, N*sizeof(unsigned));
cuckooRetrieveKernel<<<block_size, block_cnt>>>(keys, size, hashMap, hashMap_size, result, N);
cudaDeviceSynchronize();
return result;
}
template bool naive_cuckooHash<2>(int2*, int, int2*, int, Constants<2>);
template bool naive_cuckooHash<3>(int2*, int, int2*, int, Constants<3>);
template bool naive_cuckooHash<4>(int2*, int, int2*, int, Constants<4>);
template bool naive_cuckooHash<5>(int2*, int, int2*, int, Constants<5>);
template int2* naive_cuckooRetrieve<2>(int*, int, int2*, int, Constants<2>);
template int2* naive_cuckooRetrieve<3>(int*, int, int2*, int, Constants<3>);
template int2* naive_cuckooRetrieve<4>(int*, int, int2*, int, Constants<4>);
template int2* naive_cuckooRetrieve<5>(int*, int, int2*, int, Constants<5>);
|
a5e648d9ad2f3df5b4f15f49788570f04c1b05e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void){
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
| a5e648d9ad2f3df5b4f15f49788570f04c1b05e8.cu | #include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void){
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
b1e56b5541736bc0fb774e6a230915de8f5732fd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main(int argc, char **argv){
hipDeviceProp_t dP;
float min_cc = 3.0;
int deviceCount, rc;
rc = hipGetDeviceCount(&deviceCount);
if(rc != hipSuccess) {
hipError_t error = hipGetLastError();
printf("CUDA error: %s", hipGetErrorString(error));
return rc; /* Failure */
}
for (int i=0; i < deviceCount; i++)
{
rc = hipGetDeviceProperties(&dP, 0);
if(rc != hipSuccess) {
hipError_t error = hipGetLastError();
printf("CUDA error: %s", hipGetErrorString(error));
return rc; /* Failure */
}
if((dP.major+(dP.minor/10)) < min_cc) {
printf("Min Compute Capability of %2.1f required: %d.%d found\n Not Building CUDA Code", min_cc, dP.major, dP.minor);
return 1; /* Failure */
} else {
int v = dP.major*10 + dP.minor;
if (i<deviceCount-1)
printf("-gencode arch=compute_%d,code=sm_%d;",v,v);
else
printf("-gencode arch=compute_%d,code=sm_%d",v,v);
}
}
return 0; /* Success */
}
| b1e56b5541736bc0fb774e6a230915de8f5732fd.cu | #include <stdio.h>
int main(int argc, char **argv){
cudaDeviceProp dP;
float min_cc = 3.0;
int deviceCount, rc;
rc = cudaGetDeviceCount(&deviceCount);
if(rc != cudaSuccess) {
cudaError_t error = cudaGetLastError();
printf("CUDA error: %s", cudaGetErrorString(error));
return rc; /* Failure */
}
for (int i=0; i < deviceCount; i++)
{
rc = cudaGetDeviceProperties(&dP, 0);
if(rc != cudaSuccess) {
cudaError_t error = cudaGetLastError();
printf("CUDA error: %s", cudaGetErrorString(error));
return rc; /* Failure */
}
if((dP.major+(dP.minor/10)) < min_cc) {
printf("Min Compute Capability of %2.1f required: %d.%d found\n Not Building CUDA Code", min_cc, dP.major, dP.minor);
return 1; /* Failure */
} else {
int v = dP.major*10 + dP.minor;
if (i<deviceCount-1)
printf("-gencode arch=compute_%d,code=sm_%d;",v,v);
else
printf("-gencode arch=compute_%d,code=sm_%d",v,v);
}
}
return 0; /* Success */
}
|
7c88e8e62bc0c204de25596d6c3c566853abac20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file residuals_aux.cu
*
* \brief CUDA kernel to compute wave-function residuals on GPUs.
*/
#include "gpu/cuda_common.hpp"
#include "gpu/acc_runtime.hpp"
__global__ void compute_residuals_gpu_kernel
(
int const num_rows_loc__,
double const* eval__,
acc_complex_double_t const* hpsi__,
acc_complex_double_t const* opsi__,
acc_complex_double_t* res__
)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int ibnd = blockIdx.y;
if (j < num_rows_loc__) {
int k = array2D_offset(j, ibnd, num_rows_loc__);
/* res = hpsi_j - e_j * opsi_j */
res__[k] = accCsub(hpsi__[k], make_accDoubleComplex(opsi__[k].x * eval__[ibnd], opsi__[k].y * eval__[ibnd]));
}
}
//== __global__ void compute_residuals_norm_gpu_kernel
//== (
//== int num_gkvec_row,
//== int* res_idx,
//== acc_complex_double_t const* res,
//== double* res_norm,
//== int reduced,
//== int mpi_rank
//== )
//== {
//== int N = num_blocks(num_gkvec_row, blockDim.x);
//==
//== ACC_DYNAMIC_SHARED( char, sdata_ptr)
//== double* sdata = (double*)&sdata_ptr[0];
//==
//== sdata[threadIdx.x] = 0.0;
//==
//== for (int n = 0; n < N; n++)
//== {
//== int igk = n * blockDim.x + threadIdx.x;
//== if (igk < num_gkvec_row)
//== {
//== int k = array2D_offset(igk, blockIdx.x, num_gkvec_row);
//== sdata[threadIdx.x] += res[k].x * res[k].x + res[k].y * res[k].y;
//== }
//== }
//== __syncthreads();
//==
//== for (int s = 1; s < blockDim.x; s *= 2)
//== {
//== if (threadIdx.x % (2 * s) == 0) sdata[threadIdx.x] = sdata[threadIdx.x] + sdata[threadIdx.x + s];
//== __syncthreads();
//== }
//==
//== if (!reduced)
//== {
//== res_norm[res_idx[blockIdx.x]] = sdata[0];
//== }
//== else
//== {
//== if (mpi_rank == 0)
//== {
//== double x = res[array2D_offset(0, blockIdx.x, num_gkvec_row)].x;
//== res_norm[res_idx[blockIdx.x]] = 2 * sdata[0] - x * x;
//== }
//== else
//== {
//== res_norm[res_idx[blockIdx.x]] = 2 * sdata[0];
//== }
//== }
//== }
//==
//== extern "C" void residuals_aux_gpu(int num_gvec_loc__,
//== int num_res_local__,
//== int* res_idx__,
//== double* eval__,
//== acc_complex_double_t const* hpsi__,
//== acc_complex_double_t const* opsi__,
//== double const* h_diag__,
//== double const* o_diag__,
//== acc_complex_double_t* res__,
//== double* res_norm__,
//== double* p_norm__,
//== int gkvec_reduced__,
//== int mpi_rank__)
//== {
//== dim3 grid_t(64);
//== dim3 grid_b(num_blocks(num_gvec_loc__, grid_t.x), num_res_local__);
//==
//== compute_residuals_gpu_kernel <<<grid_b, grid_t>>>
//== (
//== num_gvec_loc__,
//== eval__,
//== hpsi__,
//== opsi__,
//== res__
//== );
//==
//== grid_b = dim3(num_res_local__);
//==
//== compute_residuals_norm_gpu_kernel <<<grid_b, grid_t, grid_t.x * sizeof(double)>>>
//== (
//== num_gvec_loc__,
//== res_idx__,
//== res__,
//== res_norm__,
//== gkvec_reduced__,
//== mpi_rank__
//== );
//==
//== grid_b = dim3(num_blocks(num_gvec_loc__, grid_t.x), num_res_local__);
//==
//== apply_preconditioner_gpu_kernel <<<grid_b, grid_t>>>
//== (
//== num_gvec_loc__,
//== res_idx__,
//== eval__,
//== h_diag__,
//== o_diag__,
//== res__
//== );
//==
//== grid_b = dim3(num_res_local__);
//==
//== compute_residuals_norm_gpu_kernel <<<grid_b, grid_t, grid_t.x * sizeof(double)>>>
//== (
//== num_gvec_loc__,
//== res_idx__,
//== res__,
//== p_norm__,
//== gkvec_reduced__,
//== mpi_rank__
//== );
//== }
extern "C" void compute_residuals_gpu(acc_complex_double_t* hpsi__,
acc_complex_double_t* opsi__,
acc_complex_double_t* res__,
int num_rows_loc__,
int num_bands__,
double* eval__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(num_rows_loc__, grid_t.x), num_bands__);
accLaunchKernel((compute_residuals_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
num_rows_loc__,
eval__,
hpsi__,
opsi__,
res__
);
}
template <typename T>
__global__ void add_square_sum_gpu_kernel
(
int num_rows_loc__,
gpu_complex_type<T> const* wf__,
int reduced__,
int mpi_rank__,
T* result__
)
{
int N = num_blocks(num_rows_loc__, blockDim.x);
ACC_DYNAMIC_SHARED( char, sdata_ptr)
T* sdata = (T*)&sdata_ptr[0];
sdata[threadIdx.x] = 0.0;
for (int n = 0; n < N; n++) {
int j = n * blockDim.x + threadIdx.x;
if (j < num_rows_loc__) {
int k = array2D_offset(j, blockIdx.x, num_rows_loc__);
sdata[threadIdx.x] += (wf__[k].x * wf__[k].x + wf__[k].y * wf__[k].y);
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if (threadIdx.x % (2 * s) == 0) {
sdata[threadIdx.x] = sdata[threadIdx.x] + sdata[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
if (!reduced__) {
result__[blockIdx.x] += sdata[0];
} else {
if (mpi_rank__ == 0) {
T x = wf__[array2D_offset(0, blockIdx.x, num_rows_loc__)].x;
result__[blockIdx.x] += (2 * sdata[0] - x * x);
}
else {
result__[blockIdx.x] += 2 * sdata[0];
}
}
}
}
extern "C" void add_square_sum_gpu_double(acc_complex_double_t* wf__,
int num_rows_loc__,
int nwf__,
int reduced__,
int mpi_rank__,
double* result__)
{
dim3 grid_t(64);
dim3 grid_b(nwf__);
accLaunchKernel((add_square_sum_gpu_kernel<double>), dim3(grid_b), dim3(grid_t), grid_t.x * sizeof(double), 0,
num_rows_loc__, wf__, reduced__, mpi_rank__, result__);
}
extern "C" void add_square_sum_gpu_float(acc_complex_float_t* wf__,
int num_rows_loc__,
int nwf__,
int reduced__,
int mpi_rank__,
float* result__)
{
dim3 grid_t(64);
dim3 grid_b(nwf__);
accLaunchKernel((add_square_sum_gpu_kernel<float>), dim3(grid_b), dim3(grid_t), grid_t.x * sizeof(float), 0,
num_rows_loc__, wf__, reduced__, mpi_rank__, result__);
}
__global__ void apply_preconditioner_gpu_kernel(int const num_rows_loc__,
double const* eval__,
double const* h_diag__,
double const* o_diag__,
acc_complex_double_t* res__)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int ibnd = blockIdx.y;
if (j < num_rows_loc__) {
double p = (h_diag__[j] - eval__[ibnd] * o_diag__[j]);
p = 0.5 * (1 + p + sqrt(1.0 + (p - 1) * (p - 1)));
int k = array2D_offset(j, ibnd, num_rows_loc__);
res__[k] = make_accDoubleComplex(res__[k].x / p, res__[k].y / p);
}
}
extern "C" void apply_preconditioner_gpu(acc_complex_double_t* res__,
int num_rows_loc__,
int num_bands__,
double* eval__,
const double* h_diag__,
const double* o_diag__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(num_rows_loc__, grid_t.x), num_bands__);
accLaunchKernel((apply_preconditioner_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0, num_rows_loc__, eval__, h_diag__, o_diag__, res__);
}
__global__ void make_real_g0_gpu_kernel(acc_complex_double_t* res__,
int ld__)
{
acc_complex_double_t z = res__[array2D_offset(0, blockIdx.x, ld__)];
if (threadIdx.x == 0) {
res__[array2D_offset(0, blockIdx.x, ld__)] = make_accDoubleComplex(z.x, 0);
}
}
extern "C" void make_real_g0_gpu(acc_complex_double_t* res__,
int ld__,
int n__)
{
dim3 grid_t(32);
dim3 grid_b(n__);
accLaunchKernel((make_real_g0_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0, res__, ld__);
}
| 7c88e8e62bc0c204de25596d6c3c566853abac20.cu | // Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file residuals_aux.cu
*
* \brief CUDA kernel to compute wave-function residuals on GPUs.
*/
#include "gpu/cuda_common.hpp"
#include "gpu/acc_runtime.hpp"
__global__ void compute_residuals_gpu_kernel
(
int const num_rows_loc__,
double const* eval__,
acc_complex_double_t const* hpsi__,
acc_complex_double_t const* opsi__,
acc_complex_double_t* res__
)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int ibnd = blockIdx.y;
if (j < num_rows_loc__) {
int k = array2D_offset(j, ibnd, num_rows_loc__);
/* res = hpsi_j - e_j * opsi_j */
res__[k] = accCsub(hpsi__[k], make_accDoubleComplex(opsi__[k].x * eval__[ibnd], opsi__[k].y * eval__[ibnd]));
}
}
//== __global__ void compute_residuals_norm_gpu_kernel
//== (
//== int num_gkvec_row,
//== int* res_idx,
//== acc_complex_double_t const* res,
//== double* res_norm,
//== int reduced,
//== int mpi_rank
//== )
//== {
//== int N = num_blocks(num_gkvec_row, blockDim.x);
//==
//== ACC_DYNAMIC_SHARED( char, sdata_ptr)
//== double* sdata = (double*)&sdata_ptr[0];
//==
//== sdata[threadIdx.x] = 0.0;
//==
//== for (int n = 0; n < N; n++)
//== {
//== int igk = n * blockDim.x + threadIdx.x;
//== if (igk < num_gkvec_row)
//== {
//== int k = array2D_offset(igk, blockIdx.x, num_gkvec_row);
//== sdata[threadIdx.x] += res[k].x * res[k].x + res[k].y * res[k].y;
//== }
//== }
//== __syncthreads();
//==
//== for (int s = 1; s < blockDim.x; s *= 2)
//== {
//== if (threadIdx.x % (2 * s) == 0) sdata[threadIdx.x] = sdata[threadIdx.x] + sdata[threadIdx.x + s];
//== __syncthreads();
//== }
//==
//== if (!reduced)
//== {
//== res_norm[res_idx[blockIdx.x]] = sdata[0];
//== }
//== else
//== {
//== if (mpi_rank == 0)
//== {
//== double x = res[array2D_offset(0, blockIdx.x, num_gkvec_row)].x;
//== res_norm[res_idx[blockIdx.x]] = 2 * sdata[0] - x * x;
//== }
//== else
//== {
//== res_norm[res_idx[blockIdx.x]] = 2 * sdata[0];
//== }
//== }
//== }
//==
//== extern "C" void residuals_aux_gpu(int num_gvec_loc__,
//== int num_res_local__,
//== int* res_idx__,
//== double* eval__,
//== acc_complex_double_t const* hpsi__,
//== acc_complex_double_t const* opsi__,
//== double const* h_diag__,
//== double const* o_diag__,
//== acc_complex_double_t* res__,
//== double* res_norm__,
//== double* p_norm__,
//== int gkvec_reduced__,
//== int mpi_rank__)
//== {
//== dim3 grid_t(64);
//== dim3 grid_b(num_blocks(num_gvec_loc__, grid_t.x), num_res_local__);
//==
//== compute_residuals_gpu_kernel <<<grid_b, grid_t>>>
//== (
//== num_gvec_loc__,
//== eval__,
//== hpsi__,
//== opsi__,
//== res__
//== );
//==
//== grid_b = dim3(num_res_local__);
//==
//== compute_residuals_norm_gpu_kernel <<<grid_b, grid_t, grid_t.x * sizeof(double)>>>
//== (
//== num_gvec_loc__,
//== res_idx__,
//== res__,
//== res_norm__,
//== gkvec_reduced__,
//== mpi_rank__
//== );
//==
//== grid_b = dim3(num_blocks(num_gvec_loc__, grid_t.x), num_res_local__);
//==
//== apply_preconditioner_gpu_kernel <<<grid_b, grid_t>>>
//== (
//== num_gvec_loc__,
//== res_idx__,
//== eval__,
//== h_diag__,
//== o_diag__,
//== res__
//== );
//==
//== grid_b = dim3(num_res_local__);
//==
//== compute_residuals_norm_gpu_kernel <<<grid_b, grid_t, grid_t.x * sizeof(double)>>>
//== (
//== num_gvec_loc__,
//== res_idx__,
//== res__,
//== p_norm__,
//== gkvec_reduced__,
//== mpi_rank__
//== );
//== }
extern "C" void compute_residuals_gpu(acc_complex_double_t* hpsi__,
acc_complex_double_t* opsi__,
acc_complex_double_t* res__,
int num_rows_loc__,
int num_bands__,
double* eval__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(num_rows_loc__, grid_t.x), num_bands__);
accLaunchKernel((compute_residuals_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
num_rows_loc__,
eval__,
hpsi__,
opsi__,
res__
);
}
template <typename T>
__global__ void add_square_sum_gpu_kernel
(
int num_rows_loc__,
gpu_complex_type<T> const* wf__,
int reduced__,
int mpi_rank__,
T* result__
)
{
int N = num_blocks(num_rows_loc__, blockDim.x);
ACC_DYNAMIC_SHARED( char, sdata_ptr)
T* sdata = (T*)&sdata_ptr[0];
sdata[threadIdx.x] = 0.0;
for (int n = 0; n < N; n++) {
int j = n * blockDim.x + threadIdx.x;
if (j < num_rows_loc__) {
int k = array2D_offset(j, blockIdx.x, num_rows_loc__);
sdata[threadIdx.x] += (wf__[k].x * wf__[k].x + wf__[k].y * wf__[k].y);
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if (threadIdx.x % (2 * s) == 0) {
sdata[threadIdx.x] = sdata[threadIdx.x] + sdata[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
if (!reduced__) {
result__[blockIdx.x] += sdata[0];
} else {
if (mpi_rank__ == 0) {
T x = wf__[array2D_offset(0, blockIdx.x, num_rows_loc__)].x;
result__[blockIdx.x] += (2 * sdata[0] - x * x);
}
else {
result__[blockIdx.x] += 2 * sdata[0];
}
}
}
}
extern "C" void add_square_sum_gpu_double(acc_complex_double_t* wf__,
int num_rows_loc__,
int nwf__,
int reduced__,
int mpi_rank__,
double* result__)
{
dim3 grid_t(64);
dim3 grid_b(nwf__);
accLaunchKernel((add_square_sum_gpu_kernel<double>), dim3(grid_b), dim3(grid_t), grid_t.x * sizeof(double), 0,
num_rows_loc__, wf__, reduced__, mpi_rank__, result__);
}
extern "C" void add_square_sum_gpu_float(acc_complex_float_t* wf__,
int num_rows_loc__,
int nwf__,
int reduced__,
int mpi_rank__,
float* result__)
{
dim3 grid_t(64);
dim3 grid_b(nwf__);
accLaunchKernel((add_square_sum_gpu_kernel<float>), dim3(grid_b), dim3(grid_t), grid_t.x * sizeof(float), 0,
num_rows_loc__, wf__, reduced__, mpi_rank__, result__);
}
__global__ void apply_preconditioner_gpu_kernel(int const num_rows_loc__,
double const* eval__,
double const* h_diag__,
double const* o_diag__,
acc_complex_double_t* res__)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int ibnd = blockIdx.y;
if (j < num_rows_loc__) {
double p = (h_diag__[j] - eval__[ibnd] * o_diag__[j]);
p = 0.5 * (1 + p + sqrt(1.0 + (p - 1) * (p - 1)));
int k = array2D_offset(j, ibnd, num_rows_loc__);
res__[k] = make_accDoubleComplex(res__[k].x / p, res__[k].y / p);
}
}
extern "C" void apply_preconditioner_gpu(acc_complex_double_t* res__,
int num_rows_loc__,
int num_bands__,
double* eval__,
const double* h_diag__,
const double* o_diag__)
{
dim3 grid_t(64);
dim3 grid_b(num_blocks(num_rows_loc__, grid_t.x), num_bands__);
accLaunchKernel((apply_preconditioner_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0, num_rows_loc__, eval__, h_diag__, o_diag__, res__);
}
__global__ void make_real_g0_gpu_kernel(acc_complex_double_t* res__,
int ld__)
{
acc_complex_double_t z = res__[array2D_offset(0, blockIdx.x, ld__)];
if (threadIdx.x == 0) {
res__[array2D_offset(0, blockIdx.x, ld__)] = make_accDoubleComplex(z.x, 0);
}
}
extern "C" void make_real_g0_gpu(acc_complex_double_t* res__,
int ld__,
int n__)
{
dim3 grid_t(32);
dim3 grid_b(n__);
accLaunchKernel((make_real_g0_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0, res__, ld__);
}
|
9d5d11c20d9d5ee321f756c457e246f22d22120f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (hipPeekAtLastError()) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main()
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof( int );
/* allocate space for device copies of a, b, c */
CUDA_CALL( hipMalloc( (void **) &d_a, size ) );
/* enter code here to malloc d_b and d_c */
CUDA_CALL( hipMalloc( (void **) &d_b, size) );
CUDA_CALL( hipMalloc( (void **) &d_c, size) );
/* setup initial values */
a = 2;
b = 7;
c = -99;
/* copy inputs to device */
CUDA_CALL( hipMemcpy( d_a, &a, size, hipMemcpyHostToDevice ) );
/* enter code here to copy d_b to device */
CUDA_CALL( hipMemcpy( d_b, &b, size, hipMemcpyHostToDevice ) );
CUDA_CALL( hipMemcpy( d_c, &c, size, hipMemcpyHostToDevice ) );
/* enter code here to launch the kernel on the GPU */
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
CUDA_CHECK()
CUDA_CALL( hipDeviceSynchronize() );
/* copy result back to host */
CUDA_CALL( hipMemcpy( &c, d_c, size, hipMemcpyDeviceToHost ) );
printf("value of c after kernel is %d\n",c);
/* clean up */
CUDA_CALL( hipFree( d_a ) );
/* enter code here to hipFree the d_b and d_c pointers */
/* calling reset to check errors */
CUDA_CALL( hipDeviceReset() );
return 0;
} /* end main */
| 9d5d11c20d9d5ee321f756c457e246f22d22120f.cu | /*
* Copyright 2014 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
__global__ void add(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main()
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof( int );
/* allocate space for device copies of a, b, c */
CUDA_CALL( cudaMalloc( (void **) &d_a, size ) );
/* enter code here to malloc d_b and d_c */
CUDA_CALL( cudaMalloc( (void **) &d_b, size) );
CUDA_CALL( cudaMalloc( (void **) &d_c, size) );
/* setup initial values */
a = 2;
b = 7;
c = -99;
/* copy inputs to device */
CUDA_CALL( cudaMemcpy( d_a, &a, size, cudaMemcpyHostToDevice ) );
/* enter code here to copy d_b to device */
CUDA_CALL( cudaMemcpy( d_b, &b, size, cudaMemcpyHostToDevice ) );
CUDA_CALL( cudaMemcpy( d_c, &c, size, cudaMemcpyHostToDevice ) );
/* enter code here to launch the kernel on the GPU */
add<<<1,1>>>(d_a, d_b, d_c);
CUDA_CHECK()
CUDA_CALL( cudaDeviceSynchronize() );
/* copy result back to host */
CUDA_CALL( cudaMemcpy( &c, d_c, size, cudaMemcpyDeviceToHost ) );
printf("value of c after kernel is %d\n",c);
/* clean up */
CUDA_CALL( cudaFree( d_a ) );
/* enter code here to cudaFree the d_b and d_c pointers */
/* calling reset to check errors */
CUDA_CALL( cudaDeviceReset() );
return 0;
} /* end main */
|
d26afea697572b114d0801f1d56ccd05f91ef1d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void convertVToRGBKernel(const uint16_t *pV210, uint8_t *tt1, int nSrcWidth, int nDstWidth, int nDstHeight, int *lookupTable) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint16_t tt[6];
uint4 pF;
int nDstH = nDstHeight;
int nDstW = nSrcWidth / 8;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)pV210[j + k + 0] + ((uint32_t)pV210[j + k + 1] << 16);
pF.y = (uint32_t)pV210[j + k + 2] + ((uint32_t)pV210[j + k + 3] << 16);
pF.z = (uint32_t)pV210[j + k + 4] + ((uint32_t)pV210[j + k + 5] << 16);
pF.w = (uint32_t)pV210[j + k + 6] + ((uint32_t)pV210[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[0] = (y0 + r) / 1000;
tt[1] = (y0 - g) / 1000;
tt[2] = (y0 + b) / 1000;
tt[3] = (y1 + r) / 1000;
tt[4] = (y1 - g) / 1000;
tt[5] = (y1 + b) / 1000;
tt1[j + k + 0] = lookupTable[tt[0]];
tt1[j + k + 1] = lookupTable[tt[1]];
tt1[j + k + 2] = lookupTable[tt[2]];
tt1[j + k + 3] = lookupTable[tt[3]];
tt1[j + k + 4] = lookupTable[tt[4]];
tt1[j + k + 5] = lookupTable[tt[5]];
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[0] = (y2 + r) / 1000;
tt[1] = (y2 - g) / 1000;
tt[2] = (y2 + b) / 1000;
tt[3] = (y3 + r) / 1000;
tt[4] = (y3 - g) / 1000;
tt[5] = (y3 + b) / 1000;
tt1[j + k + 6] = lookupTable[tt[0]];
tt1[j + k + 7] = lookupTable[tt[1]];
tt1[j + k + 8] = lookupTable[tt[2]];
tt1[j + k + 9] = lookupTable[tt[3]];
tt1[j + k + 10] = lookupTable[tt[4]];
tt1[j + k + 11] = lookupTable[tt[5]];
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[0] = (y4 + r) / 1000;
tt[1] = (y4 - g) / 1000;
tt[2] = (y4 + b) / 1000;
tt[3] = (y5 + r) / 1000;
tt[4] = (y5 - g) / 1000;
tt[5] = (y5 + b) / 1000;
tt1[j + k + 12] = lookupTable[tt[0]];
tt1[j + k + 13] = lookupTable[tt[1]];
tt1[j + k + 14] = lookupTable[tt[2]];
tt1[j + k + 15] = lookupTable[tt[3]];
tt1[j + k + 16] = lookupTable[tt[4]];
tt1[j + k + 17] = lookupTable[tt[5]];
}
} | d26afea697572b114d0801f1d56ccd05f91ef1d2.cu | #include "includes.h"
__global__ static void convertVToRGBKernel(const uint16_t *pV210, uint8_t *tt1, int nSrcWidth, int nDstWidth, int nDstHeight, int *lookupTable) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint16_t tt[6];
uint4 pF;
int nDstH = nDstHeight;
int nDstW = nSrcWidth / 8;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)pV210[j + k + 0] + ((uint32_t)pV210[j + k + 1] << 16);
pF.y = (uint32_t)pV210[j + k + 2] + ((uint32_t)pV210[j + k + 3] << 16);
pF.z = (uint32_t)pV210[j + k + 4] + ((uint32_t)pV210[j + k + 5] << 16);
pF.w = (uint32_t)pV210[j + k + 6] + ((uint32_t)pV210[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[0] = (y0 + r) / 1000;
tt[1] = (y0 - g) / 1000;
tt[2] = (y0 + b) / 1000;
tt[3] = (y1 + r) / 1000;
tt[4] = (y1 - g) / 1000;
tt[5] = (y1 + b) / 1000;
tt1[j + k + 0] = lookupTable[tt[0]];
tt1[j + k + 1] = lookupTable[tt[1]];
tt1[j + k + 2] = lookupTable[tt[2]];
tt1[j + k + 3] = lookupTable[tt[3]];
tt1[j + k + 4] = lookupTable[tt[4]];
tt1[j + k + 5] = lookupTable[tt[5]];
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[0] = (y2 + r) / 1000;
tt[1] = (y2 - g) / 1000;
tt[2] = (y2 + b) / 1000;
tt[3] = (y3 + r) / 1000;
tt[4] = (y3 - g) / 1000;
tt[5] = (y3 + b) / 1000;
tt1[j + k + 6] = lookupTable[tt[0]];
tt1[j + k + 7] = lookupTable[tt[1]];
tt1[j + k + 8] = lookupTable[tt[2]];
tt1[j + k + 9] = lookupTable[tt[3]];
tt1[j + k + 10] = lookupTable[tt[4]];
tt1[j + k + 11] = lookupTable[tt[5]];
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[0] = (y4 + r) / 1000;
tt[1] = (y4 - g) / 1000;
tt[2] = (y4 + b) / 1000;
tt[3] = (y5 + r) / 1000;
tt[4] = (y5 - g) / 1000;
tt[5] = (y5 + b) / 1000;
tt1[j + k + 12] = lookupTable[tt[0]];
tt1[j + k + 13] = lookupTable[tt[1]];
tt1[j + k + 14] = lookupTable[tt[2]];
tt1[j + k + 15] = lookupTable[tt[3]];
tt1[j + k + 16] = lookupTable[tt[4]];
tt1[j + k + 17] = lookupTable[tt[5]];
}
} |
8bbf26e6cbdab12bb578f891431eeddd3e1a0821.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| 8bbf26e6cbdab12bb578f891431eeddd3e1a0821.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
6513a1e1840bf400f5d1f67e68ab07727aa3d626.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sum.hh"
#include "../runtime/node.hh"
namespace gpu
{
namespace
{
constexpr std::size_t BLOCK_SIZE = 512;
__global__
void mse(const dbl_t* a, const dbl_t* b, dbl_t* out,
std::size_t len)
{
__shared__ float partial[2 * BLOCK_SIZE];
//load all elements of the array in shared memory
auto i = threadIdx.x;
std::size_t step = BLOCK_SIZE;
float init = 0;
for (std::size_t j = i; j < len; j += step)
init += (a[j] - b[j]) * (a[j] - b[j]);
partial[i] = init;
__syncthreads();
for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1)
{
if (i < s)
partial[i] += partial[i + s];
__syncthreads();
}
//if not volatile, must use __synctthreads again, why ?
volatile float* vpartial = partial;
if (i < 32)
{
vpartial[i] += vpartial[i + 32];
vpartial[i] += vpartial[i + 16];
vpartial[i] += vpartial[i + 8];
vpartial[i] += vpartial[i + 4];
vpartial[i] += vpartial[i + 2];
vpartial[i] += vpartial[i + 1];
}
if (i == 0)
*out = partial[0] / len;
}
__global__
void mse_grad(const dbl_t* a, const dbl_t* b, dbl_t* out, std::size_t len)
{
std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
std::size_t stride = blockDim.x * gridDim.x;
dbl_t coeff = dbl_t(2) / len;
for (std::size_t i = index; i < len; i += stride)
out[i] = coeff * (a[i] - b[i]);
}
__global__
void mat_sum_rows(const dbl_t* x, dbl_t* y,
std::size_t rows, std::size_t cols)
{
__shared__ dbl_t partial[2 * BLOCK_SIZE];
//load all elements of the array in shared memory
auto row = blockIdx.x;
auto col = threadIdx.x;
std::size_t step = BLOCK_SIZE;
dbl_t init = 0;
for (std::size_t i = col; i < cols; i += step)
init += x[row * cols + i];
partial[col] = init;
__syncthreads();
for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1)
{
if (col < s)
partial[col] += partial[col + s];
__syncthreads();
}
//if not volatile, must use __synctthreads again, why ?
volatile dbl_t* vpartial = partial;
if (col < 32)
{
vpartial[col] += vpartial[col + 32];
vpartial[col] += vpartial[col + 16];
vpartial[col] += vpartial[col + 8];
vpartial[col] += vpartial[col + 4];
vpartial[col] += vpartial[col + 2];
vpartial[col] += vpartial[col + 1];
}
if (col == 0)
y[row] = partial[0];
}
__global__
void mat_sum_cols(const dbl_t* x, dbl_t* y,
std::size_t rows, std::size_t cols)
{
__shared__ dbl_t partial[2 * BLOCK_SIZE];
//load all elements of the array in shared memory
auto col = blockIdx.x;
auto row = threadIdx.x;
std::size_t step = BLOCK_SIZE;
dbl_t init = 0;
for (std::size_t i = row; i < rows; i += step)
init += x[i * cols + col];
partial[row] = init;
__syncthreads();
for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1)
{
if (row < s)
partial[row] += partial[row + s];
__syncthreads();
}
//if not volatile, must use __synctthreads again, why ?
volatile dbl_t* vpartial = partial;
if (row < 32)
{
vpartial[row] += vpartial[row + 32];
vpartial[row] += vpartial[row + 16];
vpartial[row] += vpartial[row + 8];
vpartial[row] += vpartial[row + 4];
vpartial[row] += vpartial[row + 2];
vpartial[row] += vpartial[row + 1];
}
if (row == 0)
y[col] = partial[0];
}
__device__
std::size_t argmax(const dbl_t* begin, const dbl_t* end)
{
const dbl_t* res = begin;
for (const dbl_t* it = begin; it != end; ++it)
if (*it > *res)
res = it;
return res - begin;
}
__global__
void argmax_acc(const dbl_t* a, const dbl_t* b, dbl_t* out,
std::size_t rows, std::size_t cols)
{
__shared__ dbl_t partial[2 * BLOCK_SIZE];
//load all elements of the array in shared memory
auto i = threadIdx.x;
std::size_t step = BLOCK_SIZE;
dbl_t init = 0;
for (std::size_t j = i; j < rows; j += step)
init += argmax(a + j * cols, a + (j + 1) * cols)
== argmax(b + j * cols, b + (j + 1) * cols);
partial[i] = init;
__syncthreads();
for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1)
{
if (i < s)
partial[i] += partial[i + s];
__syncthreads();
}
//if not volatile, must use __synctthreads again, why ?
volatile dbl_t* vpartial = partial;
if (i < 32)
{
vpartial[i] += vpartial[i + 32];
vpartial[i] += vpartial[i + 16];
vpartial[i] += vpartial[i + 8];
vpartial[i] += vpartial[i + 4];
vpartial[i] += vpartial[i + 2];
vpartial[i] += vpartial[i + 1];
}
if (i == 0)
*out = partial[0];
}
__global__
void vect_add(const dbl_t* a, const dbl_t* b, dbl_t* y, std::size_t len)
{
std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
std::size_t stride = blockDim.x * gridDim.x;
for (std::size_t i = index; i < len; i += stride)
y[i] = a[i] + b[i];
}
}
void kernel_mse(rt::Node* node)
{
std::size_t len = node->len1 * node->len2;
hipLaunchKernelGGL(( mse), dim3(1), dim3(BLOCK_SIZE), 0, 0, node->in1, node->in2, node->out1, len);
}
void kernel_mse_grad(rt::Node* node)
{
std::size_t len = node->len1;
std::size_t nb_blocks = (len + BLOCK_SIZE - 1)/ BLOCK_SIZE;
hipLaunchKernelGGL(( mse_grad), dim3(nb_blocks), dim3(BLOCK_SIZE), 0, 0, node->in2, node->in1, node->out1, node->len1);
}
void kernel_mat_sum_rows(rt::Node* node)
{
std::size_t rows = node->len1;
std::size_t cols = node->len2;
hipLaunchKernelGGL(( mat_sum_rows), dim3(rows), dim3(BLOCK_SIZE), 0, 0, node->in1, node->out1, rows, cols);
}
void kernel_mat_sum_cols(rt::Node* node)
{
std::size_t rows = node->len1;
std::size_t cols = node->len2;
hipLaunchKernelGGL(( mat_sum_cols), dim3(cols), dim3(BLOCK_SIZE), 0, 0, node->in1, node->out1, rows, cols);
}
void kernel_argmax_acc(rt::Node* node)
{
hipLaunchKernelGGL(( argmax_acc), dim3(1), dim3(BLOCK_SIZE), 0, 0, node->in1, node->in2, node->out1,
node->len1, node->len2);
}
void kernel_add(rt::Node* node)
{
std::size_t len = node->len1;
std::size_t block_size = 256;
std::size_t nb_blocks = (len + block_size - 1)/ block_size;
hipLaunchKernelGGL(( vect_add), dim3(nb_blocks), dim3(block_size), 0, 0, node->in1, node->in2, node->out1, len);
}
}
| 6513a1e1840bf400f5d1f67e68ab07727aa3d626.cu | #include "sum.hh"
#include "../runtime/node.hh"
namespace gpu
{
namespace
{
constexpr std::size_t BLOCK_SIZE = 512;
__global__
void mse(const dbl_t* a, const dbl_t* b, dbl_t* out,
std::size_t len)
{
__shared__ float partial[2 * BLOCK_SIZE];
//load all elements of the array in shared memory
auto i = threadIdx.x;
std::size_t step = BLOCK_SIZE;
float init = 0;
for (std::size_t j = i; j < len; j += step)
init += (a[j] - b[j]) * (a[j] - b[j]);
partial[i] = init;
__syncthreads();
for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1)
{
if (i < s)
partial[i] += partial[i + s];
__syncthreads();
}
//if not volatile, must use __synctthreads again, why ?
volatile float* vpartial = partial;
if (i < 32)
{
vpartial[i] += vpartial[i + 32];
vpartial[i] += vpartial[i + 16];
vpartial[i] += vpartial[i + 8];
vpartial[i] += vpartial[i + 4];
vpartial[i] += vpartial[i + 2];
vpartial[i] += vpartial[i + 1];
}
if (i == 0)
*out = partial[0] / len;
}
__global__
void mse_grad(const dbl_t* a, const dbl_t* b, dbl_t* out, std::size_t len)
{
std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
std::size_t stride = blockDim.x * gridDim.x;
dbl_t coeff = dbl_t(2) / len;
for (std::size_t i = index; i < len; i += stride)
out[i] = coeff * (a[i] - b[i]);
}
__global__
void mat_sum_rows(const dbl_t* x, dbl_t* y,
std::size_t rows, std::size_t cols)
{
__shared__ dbl_t partial[2 * BLOCK_SIZE];
//load all elements of the array in shared memory
auto row = blockIdx.x;
auto col = threadIdx.x;
std::size_t step = BLOCK_SIZE;
dbl_t init = 0;
for (std::size_t i = col; i < cols; i += step)
init += x[row * cols + i];
partial[col] = init;
__syncthreads();
for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1)
{
if (col < s)
partial[col] += partial[col + s];
__syncthreads();
}
//if not volatile, must use __synctthreads again, why ?
volatile dbl_t* vpartial = partial;
if (col < 32)
{
vpartial[col] += vpartial[col + 32];
vpartial[col] += vpartial[col + 16];
vpartial[col] += vpartial[col + 8];
vpartial[col] += vpartial[col + 4];
vpartial[col] += vpartial[col + 2];
vpartial[col] += vpartial[col + 1];
}
if (col == 0)
y[row] = partial[0];
}
__global__
void mat_sum_cols(const dbl_t* x, dbl_t* y,
std::size_t rows, std::size_t cols)
{
__shared__ dbl_t partial[2 * BLOCK_SIZE];
//load all elements of the array in shared memory
auto col = blockIdx.x;
auto row = threadIdx.x;
std::size_t step = BLOCK_SIZE;
dbl_t init = 0;
for (std::size_t i = row; i < rows; i += step)
init += x[i * cols + col];
partial[row] = init;
__syncthreads();
for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1)
{
if (row < s)
partial[row] += partial[row + s];
__syncthreads();
}
//if not volatile, must use __synctthreads again, why ?
volatile dbl_t* vpartial = partial;
if (row < 32)
{
vpartial[row] += vpartial[row + 32];
vpartial[row] += vpartial[row + 16];
vpartial[row] += vpartial[row + 8];
vpartial[row] += vpartial[row + 4];
vpartial[row] += vpartial[row + 2];
vpartial[row] += vpartial[row + 1];
}
if (row == 0)
y[col] = partial[0];
}
__device__
std::size_t argmax(const dbl_t* begin, const dbl_t* end)
{
const dbl_t* res = begin;
for (const dbl_t* it = begin; it != end; ++it)
if (*it > *res)
res = it;
return res - begin;
}
__global__
void argmax_acc(const dbl_t* a, const dbl_t* b, dbl_t* out,
std::size_t rows, std::size_t cols)
{
__shared__ dbl_t partial[2 * BLOCK_SIZE];
//load all elements of the array in shared memory
auto i = threadIdx.x;
std::size_t step = BLOCK_SIZE;
dbl_t init = 0;
for (std::size_t j = i; j < rows; j += step)
init += argmax(a + j * cols, a + (j + 1) * cols)
== argmax(b + j * cols, b + (j + 1) * cols);
partial[i] = init;
__syncthreads();
for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1)
{
if (i < s)
partial[i] += partial[i + s];
__syncthreads();
}
//if not volatile, must use __synctthreads again, why ?
volatile dbl_t* vpartial = partial;
if (i < 32)
{
vpartial[i] += vpartial[i + 32];
vpartial[i] += vpartial[i + 16];
vpartial[i] += vpartial[i + 8];
vpartial[i] += vpartial[i + 4];
vpartial[i] += vpartial[i + 2];
vpartial[i] += vpartial[i + 1];
}
if (i == 0)
*out = partial[0];
}
__global__
void vect_add(const dbl_t* a, const dbl_t* b, dbl_t* y, std::size_t len)
{
std::size_t index = blockIdx.x * blockDim.x + threadIdx.x;
std::size_t stride = blockDim.x * gridDim.x;
for (std::size_t i = index; i < len; i += stride)
y[i] = a[i] + b[i];
}
}
void kernel_mse(rt::Node* node)
{
std::size_t len = node->len1 * node->len2;
mse<<<1, BLOCK_SIZE>>>(node->in1, node->in2, node->out1, len);
}
void kernel_mse_grad(rt::Node* node)
{
std::size_t len = node->len1;
std::size_t nb_blocks = (len + BLOCK_SIZE - 1)/ BLOCK_SIZE;
mse_grad<<<nb_blocks, BLOCK_SIZE>>>(node->in2, node->in1, node->out1, node->len1);
}
void kernel_mat_sum_rows(rt::Node* node)
{
std::size_t rows = node->len1;
std::size_t cols = node->len2;
mat_sum_rows<<<rows, BLOCK_SIZE>>>(node->in1, node->out1, rows, cols);
}
void kernel_mat_sum_cols(rt::Node* node)
{
std::size_t rows = node->len1;
std::size_t cols = node->len2;
mat_sum_cols<<<cols, BLOCK_SIZE>>>(node->in1, node->out1, rows, cols);
}
void kernel_argmax_acc(rt::Node* node)
{
argmax_acc<<<1, BLOCK_SIZE>>>(node->in1, node->in2, node->out1,
node->len1, node->len2);
}
void kernel_add(rt::Node* node)
{
std::size_t len = node->len1;
std::size_t block_size = 256;
std::size_t nb_blocks = (len + block_size - 1)/ block_size;
vect_add<<<nb_blocks, block_size>>>(node->in1, node->in2, node->out1, len);
}
}
|
a30bb0bd2450f27492a9744636d0fec013d3ef79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_util.h"
#include "centroid.h"
/** First we want to calculate a histogram of columns for now
* After that, use the centroid algorithm (center of mass)
* Weighted sum / sum
*/
__global__ void GPU_hist_colsum(float *image,
int n_rows, int n_cols, int offset, int shmemsize, int weighted) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if ((i << offset) >= n_rows || j >= n_cols) {
return;
}
// store in shared memory due to speed
extern __shared__ float interim[];
int idx = i % shmemsize;
interim[idx] = image[IJ2C(i << offset, j, n_rows)] * (idx * weighted + !weighted); // skip elements for coalescing
__syncthreads();
// summation happens // (for a power of 2)
int length = MIN(shmemsize, n_rows - (i - idx));
int idt = idx + 1;
for (int partition = 2; (idt < length) & (idx % partition == 0); partition <<= 1) {
interim[idx] += interim[idt];
__syncthreads();
idt = idx + partition;
}
if (idx == 0) {
image[IJ2C(i, j, n_rows)] = interim[idx];
}
}
__global__ void GPU_hist_rowsum(float *image,
int n_rows, int n_cols, int offset, int shmemsize, int weighted) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_rows || (j << offset) >= n_cols) {
return;
}
// store in shared memory due to speed
extern __shared__ float interim[];
int idx = j % shmemsize;
interim[idx] = image[IJ2C(i, j << offset, n_rows)] * (idx * weighted + !weighted); // skip elements for coalescing
__syncthreads();
// summation happens // (for a power of 2)
int length = MIN(shmemsize, n_cols - (j - idx));
int idt = idx + 1;
for (int partition = 2; (idt < length) & (idx % partition == 0); partition <<= 1) {
interim[idx] += interim[idt];
__syncthreads();
idt = idx + partition;
}
if (idx == 0) {
image[IJ2C(i, j, n_rows)] = interim[idx];
}
}
__global__ void GPU_memcpy_strided(float *G, float *F, int length, int stride, int offset) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= length) {
return;
}
G[i] = F[offset + i * stride];
}
void gpu_hist(const gcube &F, gcube &H, bool rowhist, bool weighted) {
// create a temporary memory space for the image
gcube G;
size_t glen;
size_t blen;
G.copy(F);
if (!rowhist) {
glen = G.n_rows;
for (int i = 0; (1 << i) <= G.n_rows; i += 8) {
blen = MIN(256, glen);
glen = ((glen - 1) >> 8) + 1;
dim3 blockSize(1, blen, 1);
dim3 gridSize(G.n_cols, glen, 1);
hipLaunchKernelGGL(( GPU_hist_colsum), dim3(gridSize), dim3(blockSize), sizeof(float) << 8, 0, 0, G.d_pixels, G.n_rows, G.n_cols, i, blen, weighted ? 1 : 0);
checkCudaErrors(hipGetLastError());
}
H.create(G.n_cols);
dim3 bs(256, 1, 1);
dim3 gs((G.n_cols-1)/256+1, 1, 1);
hipLaunchKernelGGL(( GPU_memcpy_strided), dim3(gs), dim3(bs), 0, 0, H.d_pixels, G.d_pixels, G.n_cols, G.n_rows, 0);
checkCudaErrors(hipGetLastError());
} else {
glen = G.n_cols;
for (int i = 0; (1 << i) <= G.n_cols; i += 8) {
blen = MIN(256, glen);
glen = ((glen - 1) >> 8) + 1;
dim3 blockSize(blen, 1, 1);
dim3 gridSize(glen, G.n_rows, 1);
hipLaunchKernelGGL(( GPU_hist_rowsum), dim3(gridSize), dim3(blockSize), sizeof(float) << 8, 0, 0, G.d_pixels, G.n_rows, G.n_cols, i, blen, weighted ? 1 : 0);
checkCudaErrors(hipGetLastError());
}
H.create(G.n_rows);
checkCudaErrors(hipMemcpy(H.d_pixels, G.d_pixels, sizeof(float) * G.n_rows, hipMemcpyDeviceToDevice));
}
}
void gpu_centroid(const gcube &F, double &x, double &y) { // make more efficient later on by getting rid of the ENTIRE copies of F
gcube V, wV, H, wH;
gpu_hist(F, V, true, false);
gpu_hist(F, wV, true, false);
gpu_hist(F, H, false, false);
gpu_hist(F, wH, false, false);
// calculate the centroids
gcube t, w;
gpu_hist(V, t, false, false);
gpu_hist(wV, w, false, true);
y = w.arma_cube()(0, 0, 0) / t.arma_cube()(0, 0, 0);
gpu_hist(H, t, false, false);
gpu_hist(wH, w, false, true);
x = w.arma_cube()(0, 0, 0) / t.arma_cube()(0, 0, 0);
}
| a30bb0bd2450f27492a9744636d0fec013d3ef79.cu | #include "gpu_util.h"
#include "centroid.h"
/** First we want to calculate a histogram of columns for now
* After that, use the centroid algorithm (center of mass)
* Weighted sum / sum
*/
__global__ void GPU_hist_colsum(float *image,
int n_rows, int n_cols, int offset, int shmemsize, int weighted) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if ((i << offset) >= n_rows || j >= n_cols) {
return;
}
// store in shared memory due to speed
extern __shared__ float interim[];
int idx = i % shmemsize;
interim[idx] = image[IJ2C(i << offset, j, n_rows)] * (idx * weighted + !weighted); // skip elements for coalescing
__syncthreads();
// summation happens // (for a power of 2)
int length = MIN(shmemsize, n_rows - (i - idx));
int idt = idx + 1;
for (int partition = 2; (idt < length) & (idx % partition == 0); partition <<= 1) {
interim[idx] += interim[idt];
__syncthreads();
idt = idx + partition;
}
if (idx == 0) {
image[IJ2C(i, j, n_rows)] = interim[idx];
}
}
__global__ void GPU_hist_rowsum(float *image,
int n_rows, int n_cols, int offset, int shmemsize, int weighted) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_rows || (j << offset) >= n_cols) {
return;
}
// store in shared memory due to speed
extern __shared__ float interim[];
int idx = j % shmemsize;
interim[idx] = image[IJ2C(i, j << offset, n_rows)] * (idx * weighted + !weighted); // skip elements for coalescing
__syncthreads();
// summation happens // (for a power of 2)
int length = MIN(shmemsize, n_cols - (j - idx));
int idt = idx + 1;
for (int partition = 2; (idt < length) & (idx % partition == 0); partition <<= 1) {
interim[idx] += interim[idt];
__syncthreads();
idt = idx + partition;
}
if (idx == 0) {
image[IJ2C(i, j, n_rows)] = interim[idx];
}
}
__global__ void GPU_memcpy_strided(float *G, float *F, int length, int stride, int offset) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= length) {
return;
}
G[i] = F[offset + i * stride];
}
void gpu_hist(const gcube &F, gcube &H, bool rowhist, bool weighted) {
// create a temporary memory space for the image
gcube G;
size_t glen;
size_t blen;
G.copy(F);
if (!rowhist) {
glen = G.n_rows;
for (int i = 0; (1 << i) <= G.n_rows; i += 8) {
blen = MIN(256, glen);
glen = ((glen - 1) >> 8) + 1;
dim3 blockSize(1, blen, 1);
dim3 gridSize(G.n_cols, glen, 1);
GPU_hist_colsum<<<gridSize, blockSize, sizeof(float) << 8>>>(G.d_pixels, G.n_rows, G.n_cols, i, blen, weighted ? 1 : 0);
checkCudaErrors(cudaGetLastError());
}
H.create(G.n_cols);
dim3 bs(256, 1, 1);
dim3 gs((G.n_cols-1)/256+1, 1, 1);
GPU_memcpy_strided<<<gs, bs>>>(H.d_pixels, G.d_pixels, G.n_cols, G.n_rows, 0);
checkCudaErrors(cudaGetLastError());
} else {
glen = G.n_cols;
for (int i = 0; (1 << i) <= G.n_cols; i += 8) {
blen = MIN(256, glen);
glen = ((glen - 1) >> 8) + 1;
dim3 blockSize(blen, 1, 1);
dim3 gridSize(glen, G.n_rows, 1);
GPU_hist_rowsum<<<gridSize, blockSize, sizeof(float) << 8>>>(G.d_pixels, G.n_rows, G.n_cols, i, blen, weighted ? 1 : 0);
checkCudaErrors(cudaGetLastError());
}
H.create(G.n_rows);
checkCudaErrors(cudaMemcpy(H.d_pixels, G.d_pixels, sizeof(float) * G.n_rows, cudaMemcpyDeviceToDevice));
}
}
void gpu_centroid(const gcube &F, double &x, double &y) { // make more efficient later on by getting rid of the ENTIRE copies of F
gcube V, wV, H, wH;
gpu_hist(F, V, true, false);
gpu_hist(F, wV, true, false);
gpu_hist(F, H, false, false);
gpu_hist(F, wH, false, false);
// calculate the centroids
gcube t, w;
gpu_hist(V, t, false, false);
gpu_hist(wV, w, false, true);
y = w.arma_cube()(0, 0, 0) / t.arma_cube()(0, 0, 0);
gpu_hist(H, t, false, false);
gpu_hist(wH, w, false, true);
x = w.arma_cube()(0, 0, 0) / t.arma_cube()(0, 0, 0);
}
|
ebeb1f96d8e3d2a476b574cc7c80b3fc1bab629e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void GreaterThan(float * xf, bool * xb, size_t idxf, size_t idxb, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
xb[idxb*N+i] = xf[(idxf-1)*N+i] > xf[(idxf-2)*N+i];
}
return;
} | ebeb1f96d8e3d2a476b574cc7c80b3fc1bab629e.cu | #include "includes.h"
__global__ void GreaterThan(float * xf, bool * xb, size_t idxf, size_t idxb, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
xb[idxb*N+i] = xf[(idxf-1)*N+i] > xf[(idxf-2)*N+i];
}
return;
} |
072074cf7d693354ccf58b49dccfd66f3366141e.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <thread>
#include <vector>
#include <deque>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <hip/hip_runtime.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#define CUDA_NUM_THREADS 64
#define GET_CUDA_CHANNEL(N) ceil(512.0f / N)
__global__ void root_leaf_prop_kernel(
float * in_data,
float * out_data,
float * weight,
int * sorted_index,
int * sorted_parent_index,
int batch_size,
int channel_size,
int vertex_count){
const int thread_idx = threadIdx.x;
const int batch_idx = blockIdx.x;
const int channel_idx = blockIdx.y;
const int thread_count = blockDim.x;
const int channel_step = gridDim.y;
in_data += batch_idx * vertex_count * channel_size;
out_data += batch_idx * vertex_count * channel_size;
weight += batch_idx * vertex_count;
sorted_index += batch_idx * vertex_count;
sorted_parent_index += batch_idx * vertex_count;
__shared__ int node_per_thread[CUDA_NUM_THREADS];
node_per_thread[thread_idx] = -1;
if (thread_idx == 0){
weight[0] = 0;
sorted_parent_index[0] = 0;
}
__syncthreads();
int i = thread_idx;
while (i < vertex_count){
int par = sorted_parent_index[i];
int par_thread = par % thread_count;
if ((node_per_thread[par_thread] >= par) || (i == 0)){
int cur_pos = sorted_index[i];
int par_pos = sorted_index[par];
for (int k = channel_idx * vertex_count; k < channel_size * vertex_count;
k += channel_step * vertex_count){
float edge_weight = weight[i];
out_data[cur_pos + k] = in_data[i + k] * (1 - edge_weight * edge_weight) +
out_data[par_pos + k] * edge_weight;
__threadfence_block();
}
node_per_thread[thread_idx] = i;
i += thread_count;
}
__syncthreads();
}
}
__global__ void leaf_root_aggr_kernel(
float * in_data,
float * out_data,
float * weight,
int * sorted_index,
int * sorted_child_index,
int batch_size,
int channel_size,
int vertex_count,
int max_adj_per_node){
const int thread_idx = threadIdx.x;
const int batch_idx = blockIdx.x;
const int channel_idx = blockIdx.y;
const int thread_count = blockDim.x;
const int channel_step = gridDim.y;
if (in_data != NULL){
in_data += batch_idx * vertex_count * channel_size;
}
out_data += batch_idx * vertex_count * channel_size;
weight += batch_idx * vertex_count;
sorted_index += batch_idx * vertex_count;
sorted_child_index += batch_idx * vertex_count * max_adj_per_node;
__shared__ int node_per_thread[CUDA_NUM_THREADS];
node_per_thread[thread_idx] = vertex_count;
__syncthreads();
int i = vertex_count - thread_idx - 1;
while (i >= 0){
int child_len = 0;
bool valid = true;
for (int j = 0; j < max_adj_per_node; j++){
int child = sorted_child_index[i * max_adj_per_node + j];
int child_thread = (vertex_count - child - 1) % thread_count;
if (child <= 0) break;
if (node_per_thread[child_thread] > child){
valid = false;
break;
}
child_len++;
}
if (valid){
int cur_pos = sorted_index[i];
for (int k = channel_idx * vertex_count; k < channel_size * vertex_count;
k += channel_step * vertex_count){
float aggr_sum;
if (in_data != NULL)
aggr_sum = in_data[cur_pos + k];
else
aggr_sum = 1;
for (int j = 0; j < child_len; j++){
int child = sorted_child_index[i * max_adj_per_node + j];
aggr_sum += out_data[child + k] * weight[child];
}
out_data[i + k] = aggr_sum;
}
node_per_thread[thread_idx] = i;
i -= thread_count;
}
__syncthreads();
}
}
__global__ void root_leaf_grad_kernel(
float * in_data,
float * in_grad,
float * out_data,
float * out_grad,
float * weight,
float * grad,
int * sorted_index,
int * sorted_parent_index,
int batch_size,
int data_channel_size,
int grad_channel_size,
int vertex_count){
const int thread_idx = threadIdx.x;
const int batch_idx = blockIdx.x;
const int channel_idx = blockIdx.y;
const int thread_count = blockDim.x;
const int channel_step = gridDim.y;
const int channel_size = data_channel_size > grad_channel_size ? data_channel_size : grad_channel_size;
in_data += batch_idx * vertex_count * data_channel_size;
in_grad += batch_idx * vertex_count * grad_channel_size;
out_data += batch_idx * vertex_count * data_channel_size;
out_grad += batch_idx * vertex_count * grad_channel_size;
weight += batch_idx * vertex_count;
grad += batch_idx * vertex_count * channel_size;
sorted_index += batch_idx * vertex_count;
sorted_parent_index += batch_idx * vertex_count;
__shared__ int node_per_thread[CUDA_NUM_THREADS];
node_per_thread[thread_idx] = -1;
int i = thread_idx;
while (i < vertex_count){
int cur = i;
int par = sorted_parent_index[i];
int par_pos = sorted_index[par];
int par_thread = par % thread_count;
if ((cur == 0) || (node_per_thread[par_thread] >= par)){
for (int k = channel_idx; k < channel_size; k += channel_step){
float edge_weight = weight[i];
int data_offset = (k % data_channel_size) * vertex_count;
int grad_offset = (k % grad_channel_size) * vertex_count;
int out_offset = k * vertex_count;
if (cur > 0){
float left = in_grad[cur + grad_offset] * (out_data[par_pos + data_offset] - edge_weight * in_data[cur + data_offset]);
float right = in_data[cur + data_offset] * (out_grad[par + grad_offset] - edge_weight * in_grad[cur + grad_offset]);
grad[cur + out_offset] = left + right;
out_grad[cur + grad_offset] = in_grad[cur + grad_offset] * (1 - edge_weight * edge_weight) +
out_grad[par + grad_offset] * edge_weight;
__threadfence_block();
}
else
grad[cur + out_offset] = 0;
}
node_per_thread[thread_idx] = i;
i += thread_count;
}
__syncthreads();
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor>
refine_forward(
const at::Tensor & feature_in_tensor,
const at::Tensor & edge_weight_tensor,
const at::Tensor & sorted_index_tensor,
const at::Tensor & sorted_parent_tensor,
const at::Tensor & sorted_child_tensor
){
const int batch_size = feature_in_tensor.size(0);
const int channel_size = feature_in_tensor.size(1);
const int vertex_size = feature_in_tensor.size(2);
const int max_adj_per_node = sorted_child_tensor.size(2);
auto options = feature_in_tensor.options();
auto feature_aggr_tensor = at::zeros_like(feature_in_tensor, options);
auto feature_aggr_up_tensor = at::zeros_like(feature_in_tensor, options);
auto weight_sum_tensor = at::zeros({batch_size, vertex_size}, options);
auto weight_sum_up_tensor = at::zeros({batch_size, vertex_size}, options);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
float * feature_in = feature_in_tensor.contiguous().data<float>();
float * edge_weight = edge_weight_tensor.contiguous().data<float>();
int * sorted_index = sorted_index_tensor.contiguous().data<int>();
int * sorted_parent_index = sorted_parent_tensor.contiguous().data<int>();
int * sorted_child_index = sorted_child_tensor.contiguous().data<int>();
float * feature_aggr = feature_aggr_tensor.contiguous().data<float>();
float * feature_aggr_sum = feature_aggr_up_tensor.contiguous().data<float>();
float * weight_sum = weight_sum_tensor.contiguous().data<float>();
float * weight_aggr_sum = weight_sum_up_tensor.contiguous().data<float>();
dim3 feature_block_dims(CUDA_NUM_THREADS, 1, 1), feature_grid_dims(batch_size, channel_size, 1);
hipLaunchKernelGGL(( leaf_root_aggr_kernel) , dim3(feature_grid_dims), dim3(feature_block_dims), sizeof(int) * CUDA_NUM_THREADS, stream ,
feature_in, feature_aggr_sum, edge_weight, sorted_index, sorted_child_index, batch_size, channel_size, vertex_size, max_adj_per_node);
hipLaunchKernelGGL(( root_leaf_prop_kernel) , dim3(feature_grid_dims), dim3(feature_block_dims), sizeof(int) * CUDA_NUM_THREADS, stream ,
feature_aggr_sum, feature_aggr, edge_weight, sorted_index, sorted_parent_index, batch_size, channel_size, vertex_size);
dim3 weight_block_dims(CUDA_NUM_THREADS, 1, 1), weight_grid_dims(batch_size, 1, 1);
hipLaunchKernelGGL(( leaf_root_aggr_kernel) , dim3(weight_grid_dims), dim3(weight_block_dims), sizeof(int) * CUDA_NUM_THREADS, stream ,
NULL, weight_aggr_sum, edge_weight, sorted_index, sorted_child_index, batch_size, 1, vertex_size, max_adj_per_node);
hipLaunchKernelGGL(( root_leaf_prop_kernel) , dim3(weight_grid_dims), dim3(weight_block_dims), sizeof(int) * CUDA_NUM_THREADS, stream ,
weight_aggr_sum, weight_sum, edge_weight, sorted_index, sorted_parent_index, batch_size, 1, vertex_size);
auto feature_out_tensor = feature_aggr_tensor / weight_sum_tensor.unsqueeze(1);
auto result = std::make_tuple(feature_out_tensor, feature_aggr_tensor, feature_aggr_up_tensor,
weight_sum_tensor, weight_sum_up_tensor);
return result;
}
at::Tensor refine_backward_feature(
const at::Tensor & feature_in_tensor,
const at::Tensor & edge_weight_tensor,
const at::Tensor & sorted_index_tensor,
const at::Tensor & sorted_parent_tensor,
const at::Tensor & sorted_child_tensor,
const at::Tensor & feature_out_tensor,
const at::Tensor & feature_aggr_tensor,
const at::Tensor & feature_aggr_up_tensor,
const at::Tensor & weight_sum_tensor,
const at::Tensor & weight_sum_up_tensor,
const at::Tensor & grad_out_tensor
){
auto options = feature_in_tensor.options();
auto grad_feature_tensor = at::zeros_like(feature_in_tensor, options);
auto grad_feature_aggr_sum_tensor = at::zeros_like(feature_in_tensor, options);
auto grad_out_norm_tensor = grad_out_tensor / weight_sum_tensor.unsqueeze(1);
const int batch_size = feature_in_tensor.size(0);
const int channel_size = feature_in_tensor.size(1);
const int vertex_size = feature_in_tensor.size(2);
const int max_adj_per_node = sorted_child_tensor.size(2);
float * feature_in = feature_in_tensor.contiguous().data<float>();
float * edge_weight = edge_weight_tensor.contiguous().data<float>();
int * sorted_index = sorted_index_tensor.contiguous().data<int>();
int * sorted_parent_index = sorted_parent_tensor.contiguous().data<int>();
int * sorted_child_index = sorted_child_tensor.contiguous().data<int>();
float * feature_aggr = feature_aggr_tensor.contiguous().data<float>();
float * feature_aggr_sum = feature_aggr_up_tensor.contiguous().data<float>();
float * weight_sum = weight_sum_tensor.contiguous().data<float>();
float * weight_aggr_sum = weight_sum_up_tensor.contiguous().data<float>();
float * grad_out = grad_out_tensor.contiguous().data<float>();
float * grad_feature = grad_feature_tensor.contiguous().data<float>();
float * grad_out_norm = grad_out_norm_tensor.contiguous().data<float>();
float * grad_feature_aggr_sum = grad_feature_aggr_sum_tensor.contiguous().data<float>();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 feature_block_dims(CUDA_NUM_THREADS, 1, 1), feature_grid_dims(batch_size, channel_size, 1);
hipLaunchKernelGGL(( leaf_root_aggr_kernel) , dim3(feature_grid_dims), dim3(feature_block_dims), sizeof(int) * CUDA_NUM_THREADS, stream ,
grad_out_norm, grad_feature_aggr_sum, edge_weight, sorted_index, sorted_child_index, batch_size, channel_size, vertex_size, max_adj_per_node);
hipLaunchKernelGGL(( root_leaf_prop_kernel) , dim3(feature_grid_dims), dim3(feature_block_dims), sizeof(int) * CUDA_NUM_THREADS, stream ,
grad_feature_aggr_sum, grad_feature, edge_weight, sorted_index, sorted_parent_index, batch_size, channel_size, vertex_size);
return grad_feature_tensor;
}
at::Tensor refine_backward_weight(
const at::Tensor & feature_in_tensor,
const at::Tensor & edge_weight_tensor,
const at::Tensor & sorted_index_tensor,
const at::Tensor & sorted_parent_tensor,
const at::Tensor & sorted_child_tensor,
const at::Tensor & feature_out_tensor,
const at::Tensor & feature_aggr_tensor,
const at::Tensor & feature_aggr_up_tensor,
const at::Tensor & weight_sum_tensor,
const at::Tensor & weight_sum_up_tensor,
const at::Tensor & grad_out_tensor
){
auto options = feature_in_tensor.options();
auto grad_weight_tensor = at::zeros_like(edge_weight_tensor, options);
const int batch_size = feature_in_tensor.size(0);
const int channel_size = feature_in_tensor.size(1);
const int vertex_size = feature_in_tensor.size(2);
const int max_adj_per_node = sorted_child_tensor.size(2);
float * feature_in = feature_in_tensor.contiguous().data<float>();
float * edge_weight = edge_weight_tensor.contiguous().data<float>();
int * sorted_index = sorted_index_tensor.contiguous().data<int>();
int * sorted_parent_index = sorted_parent_tensor.contiguous().data<int>();
int * sorted_child_index = sorted_child_tensor.contiguous().data<int>();
float * feature_out = feature_out_tensor.contiguous().data<float>();
float * feature_aggr = feature_aggr_tensor.contiguous().data<float>();
float * feature_aggr_sum = feature_aggr_up_tensor.contiguous().data<float>();
float * weight_sum = weight_sum_tensor.contiguous().data<float>();
float * weight_aggr_sum = weight_sum_up_tensor.contiguous().data<float>();
float * grad_out = grad_out_tensor.contiguous().data<float>();
float * grad_weight = grad_weight_tensor.contiguous().data<float>();
auto grad_all_channel_tensor = at::zeros_like(feature_in_tensor, options);
auto grad_norm_all_channel_tensor = at::zeros_like(feature_in_tensor, options);
auto grad_out_norm_aggr_sum_tensor = at::zeros_like(feature_in_tensor, options);
auto feature_grad_aggr_sum_tensor = at::zeros_like(feature_in_tensor, options);
float * grad_all_channel = grad_all_channel_tensor.contiguous().data<float>();
float * grad_norm_all_channel = grad_norm_all_channel_tensor.contiguous().data<float>();
float * grad_out_norm_aggr_sum = grad_out_norm_aggr_sum_tensor.contiguous().data<float>();
float * feature_grad_aggr_sum = feature_grad_aggr_sum_tensor.contiguous().data<float>();
auto grad_out_norm_tensor = grad_out_tensor / weight_sum_tensor.unsqueeze(1);
auto feature_grad_tensor = grad_out_norm_tensor * feature_out_tensor;
float * grad_out_norm = grad_out_norm_tensor.contiguous().data<float>();
float * feature_grad = feature_grad_tensor.contiguous().data<float>();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 feature_block_dims(CUDA_NUM_THREADS, 1, 1), feature_grid_dims(batch_size, channel_size, 1);
hipLaunchKernelGGL(( leaf_root_aggr_kernel) , dim3(feature_grid_dims), dim3(feature_block_dims), sizeof(int) * CUDA_NUM_THREADS, stream ,
grad_out_norm, grad_out_norm_aggr_sum, edge_weight, sorted_index, sorted_child_index, batch_size, channel_size, vertex_size, max_adj_per_node);
hipLaunchKernelGGL(( leaf_root_aggr_kernel) , dim3(feature_grid_dims), dim3(feature_block_dims), sizeof(int) * CUDA_NUM_THREADS, stream ,
feature_grad, feature_grad_aggr_sum, edge_weight, sorted_index, sorted_child_index, batch_size, channel_size, vertex_size, max_adj_per_node);
hipLaunchKernelGGL(( root_leaf_grad_kernel) , dim3(feature_grid_dims), dim3(feature_block_dims), sizeof(int) * CUDA_NUM_THREADS, stream ,
feature_aggr_sum, grad_out_norm_aggr_sum, feature_aggr, grad_out_norm_aggr_sum, edge_weight, grad_all_channel,
sorted_index, sorted_parent_index, batch_size, channel_size, channel_size, vertex_size);
hipLaunchKernelGGL(( root_leaf_grad_kernel) , dim3(feature_grid_dims), dim3(feature_block_dims), sizeof(int) * CUDA_NUM_THREADS, stream ,
weight_aggr_sum, feature_grad_aggr_sum, weight_sum, feature_grad_aggr_sum, edge_weight, grad_norm_all_channel,
sorted_index, sorted_parent_index, batch_size, 1, channel_size, vertex_size);
grad_weight_tensor = (grad_all_channel_tensor - grad_norm_all_channel_tensor).sum(1);
return grad_weight_tensor;
}
| 072074cf7d693354ccf58b49dccfd66f3366141e.cu | #include <math.h>
#include <thread>
#include <vector>
#include <deque>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <cuda_runtime.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#define CUDA_NUM_THREADS 64
#define GET_CUDA_CHANNEL(N) ceil(512.0f / N)
__global__ void root_leaf_prop_kernel(
float * in_data,
float * out_data,
float * weight,
int * sorted_index,
int * sorted_parent_index,
int batch_size,
int channel_size,
int vertex_count){
const int thread_idx = threadIdx.x;
const int batch_idx = blockIdx.x;
const int channel_idx = blockIdx.y;
const int thread_count = blockDim.x;
const int channel_step = gridDim.y;
in_data += batch_idx * vertex_count * channel_size;
out_data += batch_idx * vertex_count * channel_size;
weight += batch_idx * vertex_count;
sorted_index += batch_idx * vertex_count;
sorted_parent_index += batch_idx * vertex_count;
__shared__ int node_per_thread[CUDA_NUM_THREADS];
node_per_thread[thread_idx] = -1;
if (thread_idx == 0){
weight[0] = 0;
sorted_parent_index[0] = 0;
}
__syncthreads();
int i = thread_idx;
while (i < vertex_count){
int par = sorted_parent_index[i];
int par_thread = par % thread_count;
if ((node_per_thread[par_thread] >= par) || (i == 0)){
int cur_pos = sorted_index[i];
int par_pos = sorted_index[par];
for (int k = channel_idx * vertex_count; k < channel_size * vertex_count;
k += channel_step * vertex_count){
float edge_weight = weight[i];
out_data[cur_pos + k] = in_data[i + k] * (1 - edge_weight * edge_weight) +
out_data[par_pos + k] * edge_weight;
__threadfence_block();
}
node_per_thread[thread_idx] = i;
i += thread_count;
}
__syncthreads();
}
}
__global__ void leaf_root_aggr_kernel(
float * in_data,
float * out_data,
float * weight,
int * sorted_index,
int * sorted_child_index,
int batch_size,
int channel_size,
int vertex_count,
int max_adj_per_node){
const int thread_idx = threadIdx.x;
const int batch_idx = blockIdx.x;
const int channel_idx = blockIdx.y;
const int thread_count = blockDim.x;
const int channel_step = gridDim.y;
if (in_data != NULL){
in_data += batch_idx * vertex_count * channel_size;
}
out_data += batch_idx * vertex_count * channel_size;
weight += batch_idx * vertex_count;
sorted_index += batch_idx * vertex_count;
sorted_child_index += batch_idx * vertex_count * max_adj_per_node;
__shared__ int node_per_thread[CUDA_NUM_THREADS];
node_per_thread[thread_idx] = vertex_count;
__syncthreads();
int i = vertex_count - thread_idx - 1;
while (i >= 0){
int child_len = 0;
bool valid = true;
for (int j = 0; j < max_adj_per_node; j++){
int child = sorted_child_index[i * max_adj_per_node + j];
int child_thread = (vertex_count - child - 1) % thread_count;
if (child <= 0) break;
if (node_per_thread[child_thread] > child){
valid = false;
break;
}
child_len++;
}
if (valid){
int cur_pos = sorted_index[i];
for (int k = channel_idx * vertex_count; k < channel_size * vertex_count;
k += channel_step * vertex_count){
float aggr_sum;
if (in_data != NULL)
aggr_sum = in_data[cur_pos + k];
else
aggr_sum = 1;
for (int j = 0; j < child_len; j++){
int child = sorted_child_index[i * max_adj_per_node + j];
aggr_sum += out_data[child + k] * weight[child];
}
out_data[i + k] = aggr_sum;
}
node_per_thread[thread_idx] = i;
i -= thread_count;
}
__syncthreads();
}
}
__global__ void root_leaf_grad_kernel(
float * in_data,
float * in_grad,
float * out_data,
float * out_grad,
float * weight,
float * grad,
int * sorted_index,
int * sorted_parent_index,
int batch_size,
int data_channel_size,
int grad_channel_size,
int vertex_count){
const int thread_idx = threadIdx.x;
const int batch_idx = blockIdx.x;
const int channel_idx = blockIdx.y;
const int thread_count = blockDim.x;
const int channel_step = gridDim.y;
const int channel_size = data_channel_size > grad_channel_size ? data_channel_size : grad_channel_size;
in_data += batch_idx * vertex_count * data_channel_size;
in_grad += batch_idx * vertex_count * grad_channel_size;
out_data += batch_idx * vertex_count * data_channel_size;
out_grad += batch_idx * vertex_count * grad_channel_size;
weight += batch_idx * vertex_count;
grad += batch_idx * vertex_count * channel_size;
sorted_index += batch_idx * vertex_count;
sorted_parent_index += batch_idx * vertex_count;
__shared__ int node_per_thread[CUDA_NUM_THREADS];
node_per_thread[thread_idx] = -1;
int i = thread_idx;
while (i < vertex_count){
int cur = i;
int par = sorted_parent_index[i];
int par_pos = sorted_index[par];
int par_thread = par % thread_count;
if ((cur == 0) || (node_per_thread[par_thread] >= par)){
for (int k = channel_idx; k < channel_size; k += channel_step){
float edge_weight = weight[i];
int data_offset = (k % data_channel_size) * vertex_count;
int grad_offset = (k % grad_channel_size) * vertex_count;
int out_offset = k * vertex_count;
if (cur > 0){
float left = in_grad[cur + grad_offset] * (out_data[par_pos + data_offset] - edge_weight * in_data[cur + data_offset]);
float right = in_data[cur + data_offset] * (out_grad[par + grad_offset] - edge_weight * in_grad[cur + grad_offset]);
grad[cur + out_offset] = left + right;
out_grad[cur + grad_offset] = in_grad[cur + grad_offset] * (1 - edge_weight * edge_weight) +
out_grad[par + grad_offset] * edge_weight;
__threadfence_block();
}
else
grad[cur + out_offset] = 0;
}
node_per_thread[thread_idx] = i;
i += thread_count;
}
__syncthreads();
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor>
refine_forward(
const at::Tensor & feature_in_tensor,
const at::Tensor & edge_weight_tensor,
const at::Tensor & sorted_index_tensor,
const at::Tensor & sorted_parent_tensor,
const at::Tensor & sorted_child_tensor
){
const int batch_size = feature_in_tensor.size(0);
const int channel_size = feature_in_tensor.size(1);
const int vertex_size = feature_in_tensor.size(2);
const int max_adj_per_node = sorted_child_tensor.size(2);
auto options = feature_in_tensor.options();
auto feature_aggr_tensor = at::zeros_like(feature_in_tensor, options);
auto feature_aggr_up_tensor = at::zeros_like(feature_in_tensor, options);
auto weight_sum_tensor = at::zeros({batch_size, vertex_size}, options);
auto weight_sum_up_tensor = at::zeros({batch_size, vertex_size}, options);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
float * feature_in = feature_in_tensor.contiguous().data<float>();
float * edge_weight = edge_weight_tensor.contiguous().data<float>();
int * sorted_index = sorted_index_tensor.contiguous().data<int>();
int * sorted_parent_index = sorted_parent_tensor.contiguous().data<int>();
int * sorted_child_index = sorted_child_tensor.contiguous().data<int>();
float * feature_aggr = feature_aggr_tensor.contiguous().data<float>();
float * feature_aggr_sum = feature_aggr_up_tensor.contiguous().data<float>();
float * weight_sum = weight_sum_tensor.contiguous().data<float>();
float * weight_aggr_sum = weight_sum_up_tensor.contiguous().data<float>();
dim3 feature_block_dims(CUDA_NUM_THREADS, 1, 1), feature_grid_dims(batch_size, channel_size, 1);
leaf_root_aggr_kernel <<< feature_grid_dims, feature_block_dims, sizeof(int) * CUDA_NUM_THREADS, stream >>>(
feature_in, feature_aggr_sum, edge_weight, sorted_index, sorted_child_index, batch_size, channel_size, vertex_size, max_adj_per_node);
root_leaf_prop_kernel <<< feature_grid_dims, feature_block_dims, sizeof(int) * CUDA_NUM_THREADS, stream >>>(
feature_aggr_sum, feature_aggr, edge_weight, sorted_index, sorted_parent_index, batch_size, channel_size, vertex_size);
dim3 weight_block_dims(CUDA_NUM_THREADS, 1, 1), weight_grid_dims(batch_size, 1, 1);
leaf_root_aggr_kernel <<< weight_grid_dims, weight_block_dims, sizeof(int) * CUDA_NUM_THREADS, stream >>>(
NULL, weight_aggr_sum, edge_weight, sorted_index, sorted_child_index, batch_size, 1, vertex_size, max_adj_per_node);
root_leaf_prop_kernel <<< weight_grid_dims, weight_block_dims, sizeof(int) * CUDA_NUM_THREADS, stream >>>(
weight_aggr_sum, weight_sum, edge_weight, sorted_index, sorted_parent_index, batch_size, 1, vertex_size);
auto feature_out_tensor = feature_aggr_tensor / weight_sum_tensor.unsqueeze(1);
auto result = std::make_tuple(feature_out_tensor, feature_aggr_tensor, feature_aggr_up_tensor,
weight_sum_tensor, weight_sum_up_tensor);
return result;
}
at::Tensor refine_backward_feature(
const at::Tensor & feature_in_tensor,
const at::Tensor & edge_weight_tensor,
const at::Tensor & sorted_index_tensor,
const at::Tensor & sorted_parent_tensor,
const at::Tensor & sorted_child_tensor,
const at::Tensor & feature_out_tensor,
const at::Tensor & feature_aggr_tensor,
const at::Tensor & feature_aggr_up_tensor,
const at::Tensor & weight_sum_tensor,
const at::Tensor & weight_sum_up_tensor,
const at::Tensor & grad_out_tensor
){
auto options = feature_in_tensor.options();
auto grad_feature_tensor = at::zeros_like(feature_in_tensor, options);
auto grad_feature_aggr_sum_tensor = at::zeros_like(feature_in_tensor, options);
auto grad_out_norm_tensor = grad_out_tensor / weight_sum_tensor.unsqueeze(1);
const int batch_size = feature_in_tensor.size(0);
const int channel_size = feature_in_tensor.size(1);
const int vertex_size = feature_in_tensor.size(2);
const int max_adj_per_node = sorted_child_tensor.size(2);
float * feature_in = feature_in_tensor.contiguous().data<float>();
float * edge_weight = edge_weight_tensor.contiguous().data<float>();
int * sorted_index = sorted_index_tensor.contiguous().data<int>();
int * sorted_parent_index = sorted_parent_tensor.contiguous().data<int>();
int * sorted_child_index = sorted_child_tensor.contiguous().data<int>();
float * feature_aggr = feature_aggr_tensor.contiguous().data<float>();
float * feature_aggr_sum = feature_aggr_up_tensor.contiguous().data<float>();
float * weight_sum = weight_sum_tensor.contiguous().data<float>();
float * weight_aggr_sum = weight_sum_up_tensor.contiguous().data<float>();
float * grad_out = grad_out_tensor.contiguous().data<float>();
float * grad_feature = grad_feature_tensor.contiguous().data<float>();
float * grad_out_norm = grad_out_norm_tensor.contiguous().data<float>();
float * grad_feature_aggr_sum = grad_feature_aggr_sum_tensor.contiguous().data<float>();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 feature_block_dims(CUDA_NUM_THREADS, 1, 1), feature_grid_dims(batch_size, channel_size, 1);
leaf_root_aggr_kernel <<< feature_grid_dims, feature_block_dims, sizeof(int) * CUDA_NUM_THREADS, stream >>>(
grad_out_norm, grad_feature_aggr_sum, edge_weight, sorted_index, sorted_child_index, batch_size, channel_size, vertex_size, max_adj_per_node);
root_leaf_prop_kernel <<< feature_grid_dims, feature_block_dims, sizeof(int) * CUDA_NUM_THREADS, stream >>>(
grad_feature_aggr_sum, grad_feature, edge_weight, sorted_index, sorted_parent_index, batch_size, channel_size, vertex_size);
return grad_feature_tensor;
}
at::Tensor refine_backward_weight(
const at::Tensor & feature_in_tensor,
const at::Tensor & edge_weight_tensor,
const at::Tensor & sorted_index_tensor,
const at::Tensor & sorted_parent_tensor,
const at::Tensor & sorted_child_tensor,
const at::Tensor & feature_out_tensor,
const at::Tensor & feature_aggr_tensor,
const at::Tensor & feature_aggr_up_tensor,
const at::Tensor & weight_sum_tensor,
const at::Tensor & weight_sum_up_tensor,
const at::Tensor & grad_out_tensor
){
auto options = feature_in_tensor.options();
auto grad_weight_tensor = at::zeros_like(edge_weight_tensor, options);
const int batch_size = feature_in_tensor.size(0);
const int channel_size = feature_in_tensor.size(1);
const int vertex_size = feature_in_tensor.size(2);
const int max_adj_per_node = sorted_child_tensor.size(2);
float * feature_in = feature_in_tensor.contiguous().data<float>();
float * edge_weight = edge_weight_tensor.contiguous().data<float>();
int * sorted_index = sorted_index_tensor.contiguous().data<int>();
int * sorted_parent_index = sorted_parent_tensor.contiguous().data<int>();
int * sorted_child_index = sorted_child_tensor.contiguous().data<int>();
float * feature_out = feature_out_tensor.contiguous().data<float>();
float * feature_aggr = feature_aggr_tensor.contiguous().data<float>();
float * feature_aggr_sum = feature_aggr_up_tensor.contiguous().data<float>();
float * weight_sum = weight_sum_tensor.contiguous().data<float>();
float * weight_aggr_sum = weight_sum_up_tensor.contiguous().data<float>();
float * grad_out = grad_out_tensor.contiguous().data<float>();
float * grad_weight = grad_weight_tensor.contiguous().data<float>();
auto grad_all_channel_tensor = at::zeros_like(feature_in_tensor, options);
auto grad_norm_all_channel_tensor = at::zeros_like(feature_in_tensor, options);
auto grad_out_norm_aggr_sum_tensor = at::zeros_like(feature_in_tensor, options);
auto feature_grad_aggr_sum_tensor = at::zeros_like(feature_in_tensor, options);
float * grad_all_channel = grad_all_channel_tensor.contiguous().data<float>();
float * grad_norm_all_channel = grad_norm_all_channel_tensor.contiguous().data<float>();
float * grad_out_norm_aggr_sum = grad_out_norm_aggr_sum_tensor.contiguous().data<float>();
float * feature_grad_aggr_sum = feature_grad_aggr_sum_tensor.contiguous().data<float>();
auto grad_out_norm_tensor = grad_out_tensor / weight_sum_tensor.unsqueeze(1);
auto feature_grad_tensor = grad_out_norm_tensor * feature_out_tensor;
float * grad_out_norm = grad_out_norm_tensor.contiguous().data<float>();
float * feature_grad = feature_grad_tensor.contiguous().data<float>();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 feature_block_dims(CUDA_NUM_THREADS, 1, 1), feature_grid_dims(batch_size, channel_size, 1);
leaf_root_aggr_kernel <<< feature_grid_dims, feature_block_dims, sizeof(int) * CUDA_NUM_THREADS, stream >>>(
grad_out_norm, grad_out_norm_aggr_sum, edge_weight, sorted_index, sorted_child_index, batch_size, channel_size, vertex_size, max_adj_per_node);
leaf_root_aggr_kernel <<< feature_grid_dims, feature_block_dims, sizeof(int) * CUDA_NUM_THREADS, stream >>>(
feature_grad, feature_grad_aggr_sum, edge_weight, sorted_index, sorted_child_index, batch_size, channel_size, vertex_size, max_adj_per_node);
root_leaf_grad_kernel <<< feature_grid_dims, feature_block_dims, sizeof(int) * CUDA_NUM_THREADS, stream >>>(
feature_aggr_sum, grad_out_norm_aggr_sum, feature_aggr, grad_out_norm_aggr_sum, edge_weight, grad_all_channel,
sorted_index, sorted_parent_index, batch_size, channel_size, channel_size, vertex_size);
root_leaf_grad_kernel <<< feature_grid_dims, feature_block_dims, sizeof(int) * CUDA_NUM_THREADS, stream >>>(
weight_aggr_sum, feature_grad_aggr_sum, weight_sum, feature_grad_aggr_sum, edge_weight, grad_norm_all_channel,
sorted_index, sorted_parent_index, batch_size, 1, channel_size, vertex_size);
grad_weight_tensor = (grad_all_channel_tensor - grad_norm_all_channel_tensor).sum(1);
return grad_weight_tensor;
}
|
0b724b90f946761d92243953c700904084a4bd54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11) {
if (comp < (-1.3332E2f * (var_1 + +1.9777E-37f + -1.7981E26f / (var_2 - var_3)))) {
comp = (var_4 / +0.0f / atan2f(+1.1750E-37f, -1.5602E-26f / (+0.0f / (+1.5070E-42f / -1.2048E-36f / var_5))));
float tmp_1 = atanf((var_6 + var_7));
comp += tmp_1 + -1.6271E35f * (-1.0488E-37f / (-1.5361E-41f * var_8 / var_9));
comp += powf(var_10 + -0.0f, (+0.0f / (var_11 * (-1.1546E-26f / +1.1412E-42f))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12);
hipDeviceSynchronize();
return 0;
}
| 0b724b90f946761d92243953c700904084a4bd54.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11) {
if (comp < (-1.3332E2f * (var_1 + +1.9777E-37f + -1.7981E26f / (var_2 - var_3)))) {
comp = (var_4 / +0.0f / atan2f(+1.1750E-37f, -1.5602E-26f / (+0.0f / (+1.5070E-42f / -1.2048E-36f / var_5))));
float tmp_1 = atanf((var_6 + var_7));
comp += tmp_1 + -1.6271E35f * (-1.0488E-37f / (-1.5361E-41f * var_8 / var_9));
comp += powf(var_10 + -0.0f, (+0.0f / (var_11 * (-1.1546E-26f / +1.1412E-42f))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12);
cudaDeviceSynchronize();
return 0;
}
|
ffa9f46b40e647873d1f11ef9d031c29d70bd8a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include <IL/il.h>
using namespace std;
__global__ void sobel(unsigned char *data,unsigned char *out,size_t rows,size_t cols){
auto idx = blockIdx.x * blockDim.x + threadIdx.x;
auto idy = blockIdx.y * blockDim.y + threadIdx.y;
int h,v,res;
if( idx > 0 && idx < rows-1 && idy > 0 && idy < cols-1){
for(int c = 0 ; c < 3 ; ++c) {
// Horizontal
h = data[((idx - 1) * cols + idy - 1) * 3 + c] - data[((idx - 1) * cols + idy + 1) * 3 + c]
+ 2 * data[( idx * cols + idy - 1) * 3 + c] - 2 * data[( idx * cols + idy + 1) * 3 + c]
+ data[((idx + 1) * cols + idy - 1) * 3 + c] - data[((idx + 1) * cols + idy + 1) * 3 + c];
// Vertical
v = data[((idx - 1) * cols + idy - 1) * 3 + c] - data[((idx + 1) * cols + idy - 1) * 3 + c]
+ 2 * data[((idx - 1) * cols + idy ) * 3 + c] - 2 * data[((idx + 1) * cols + idy ) * 3 + c]
+ data[((idx - 1) * cols + idy + 1) * 3 + c] - data[((idx + 1) * cols + idy + 1) * 3 + c];
res = h*h + v*v;
res = res > 255*255 ? 255*255 : res;
out[(idx * cols + idy) * 3 + c] =sqrtf(res);
}
}
}
int main() {
unsigned int image;
ilInit();
ilGenImages(1, &image);
ilBindImage(image);
ilLoadImage("4v9mo.jpg");
auto cols = ilGetInteger(IL_IMAGE_WIDTH);
auto rows = ilGetInteger(IL_IMAGE_HEIGHT);
auto bpp = ilGetInteger(IL_IMAGE_BYTES_PER_PIXEL);
auto size_img = cols * rows * bpp;
// Rcupration des donnes de l'image
unsigned char* data = ilGetData();
//forcer les donnes a etre dans un espace contigu en mmoire.
hipHostRegister( data, size_img, hipHostRegisterDefault);
// Traitement de l'image
unsigned char* out = (unsigned char*)malloc(size_img);
unsigned char* out_d;
unsigned char* data_d;
hipError_t err = hipMalloc( &out_d, size_img );
if( err != hipSuccess ) {
cerr << "Error: " << hipGetErrorString(err) << endl;
exit(EXIT_FAILURE);
}
err = hipMalloc(&data_d,size_img+(2*cols*bpp));
if( err != hipSuccess ) {
cerr << "Error: " << hipGetErrorString(err) << endl;
exit(EXIT_FAILURE);
}
//utilisation des streams:
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start );
hipStream_t streams[2];
for( std::size_t i = 0 ; i < 2 ; ++i ){
hipStreamCreate(&streams[ i ] );
}
/*****************************************/
auto size = bpp*cols;
auto offset = 0;
for( size_t i = 0 ; i < 2 ; ++i ){
if(i == 1){
offset = bpp*cols;
}
hipMemcpyAsync(data_d + i*size_img/2+offset , data + i*size_img/2-offset , size_img/2+size, hipMemcpyHostToDevice, streams[i] );
/*hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess) {
cerr << "Error: " << hipGetErrorString(err) << endl;
exit(EXIT_FAILURE);
}*/
}
/****************************************/
//lancement du kenenel avec les streams.
dim3 t( 32, 32 );
//int rows_bis[2]; rows_bis[0] = rows int cols, rows, bpp;_bis[1] = rows/2;
dim3 b( ( rows - 1) / (t.x) + 1 , ( cols - 1 ) / (t.y) + 1 );
for(size_t i=0; i<2; i++){
hipLaunchKernelGGL(( sobel), dim3(b), dim3(t), 0,streams[i] , data_d + i*size_img/2, out_d + i*size_img/2, rows/2+1, cols );
/*hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess){
std::cout << "Error kernel : " << hipGetErrorString(err) << std::endl;
exit(EXIT_FAILURE);
}*/
}
size = 0;
for( size_t i = 0 ; i < 2 ; ++i ){
if(i == 1 && size_img%2 != 0){
size = 1;
}
hipMemcpyAsync(out + i*size_img/2, out_d + i*size_img/2 ,size_img/2+size, hipMemcpyDeviceToHost, streams[i] );
/*hipDeviceSynchronize();
err = hipGetLastError();
if( err != hipSuccess ) {
cerr << "Error cudaMemcpyAsyncDeviceToHost: " << hipGetErrorString(err)<< endl;
exit(EXIT_FAILURE);
}*/
}
hipEventRecord( stop );
hipEventSynchronize( stop );
float duration = 0.0f;
hipEventElapsedTime( &duration, start, stop );
cout << "time: " << duration << "ms\n";
//Placement des donnes dans l'image
ilSetData(out);
// Sauvegarde de l'image
ilEnable(IL_FILE_OVERWRITE);
ilSaveImage("out.jpg");
ilDeleteImages(1, &image);
free(out);
hipFree(out_d);
hipFree(data_d);
return 0;
}
| ffa9f46b40e647873d1f11ef9d031c29d70bd8a7.cu | #include <iostream>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include <IL/il.h>
using namespace std;
__global__ void sobel(unsigned char *data,unsigned char *out,size_t rows,size_t cols){
auto idx = blockIdx.x * blockDim.x + threadIdx.x;
auto idy = blockIdx.y * blockDim.y + threadIdx.y;
int h,v,res;
if( idx > 0 && idx < rows-1 && idy > 0 && idy < cols-1){
for(int c = 0 ; c < 3 ; ++c) {
// Horizontal
h = data[((idx - 1) * cols + idy - 1) * 3 + c] - data[((idx - 1) * cols + idy + 1) * 3 + c]
+ 2 * data[( idx * cols + idy - 1) * 3 + c] - 2 * data[( idx * cols + idy + 1) * 3 + c]
+ data[((idx + 1) * cols + idy - 1) * 3 + c] - data[((idx + 1) * cols + idy + 1) * 3 + c];
// Vertical
v = data[((idx - 1) * cols + idy - 1) * 3 + c] - data[((idx + 1) * cols + idy - 1) * 3 + c]
+ 2 * data[((idx - 1) * cols + idy ) * 3 + c] - 2 * data[((idx + 1) * cols + idy ) * 3 + c]
+ data[((idx - 1) * cols + idy + 1) * 3 + c] - data[((idx + 1) * cols + idy + 1) * 3 + c];
res = h*h + v*v;
res = res > 255*255 ? 255*255 : res;
out[(idx * cols + idy) * 3 + c] =sqrtf(res);
}
}
}
int main() {
unsigned int image;
ilInit();
ilGenImages(1, &image);
ilBindImage(image);
ilLoadImage("4v9mo.jpg");
auto cols = ilGetInteger(IL_IMAGE_WIDTH);
auto rows = ilGetInteger(IL_IMAGE_HEIGHT);
auto bpp = ilGetInteger(IL_IMAGE_BYTES_PER_PIXEL);
auto size_img = cols * rows * bpp;
// Récupération des données de l'image
unsigned char* data = ilGetData();
//forcer les données a etre dans un espace contigu en mémoire.
cudaHostRegister( data, size_img, cudaHostRegisterDefault);
// Traitement de l'image
unsigned char* out = (unsigned char*)malloc(size_img);
unsigned char* out_d;
unsigned char* data_d;
cudaError_t err = cudaMalloc( &out_d, size_img );
if( err != cudaSuccess ) {
cerr << "Error: " << cudaGetErrorString(err) << endl;
exit(EXIT_FAILURE);
}
err = cudaMalloc(&data_d,size_img+(2*cols*bpp));
if( err != cudaSuccess ) {
cerr << "Error: " << cudaGetErrorString(err) << endl;
exit(EXIT_FAILURE);
}
//utilisation des streams:
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start );
cudaStream_t streams[2];
for( std::size_t i = 0 ; i < 2 ; ++i ){
cudaStreamCreate(&streams[ i ] );
}
/*****************************************/
auto size = bpp*cols;
auto offset = 0;
for( size_t i = 0 ; i < 2 ; ++i ){
if(i == 1){
offset = bpp*cols;
}
cudaMemcpyAsync(data_d + i*size_img/2+offset , data + i*size_img/2-offset , size_img/2+size, cudaMemcpyHostToDevice, streams[i] );
/*cudaDeviceSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess) {
cerr << "Error: " << cudaGetErrorString(err) << endl;
exit(EXIT_FAILURE);
}*/
}
/****************************************/
//lancement du kenenel avec les streams.
dim3 t( 32, 32 );
//int rows_bis[2]; rows_bis[0] = rows int cols, rows, bpp;_bis[1] = rows/2;
dim3 b( ( rows - 1) / (t.x) + 1 , ( cols - 1 ) / (t.y) + 1 );
for(size_t i=0; i<2; i++){
sobel<<< b, t, 0,streams[i] >>>( data_d + i*size_img/2, out_d + i*size_img/2, rows/2+1, cols );
/*cudaDeviceSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess){
std::cout << "Error kernel : " << cudaGetErrorString(err) << std::endl;
exit(EXIT_FAILURE);
}*/
}
size = 0;
for( size_t i = 0 ; i < 2 ; ++i ){
if(i == 1 && size_img%2 != 0){
size = 1;
}
cudaMemcpyAsync(out + i*size_img/2, out_d + i*size_img/2 ,size_img/2+size, cudaMemcpyDeviceToHost, streams[i] );
/*cudaDeviceSynchronize();
err = cudaGetLastError();
if( err != cudaSuccess ) {
cerr << "Error cudaMemcpyAsyncDeviceToHost: " << cudaGetErrorString(err)<< endl;
exit(EXIT_FAILURE);
}*/
}
cudaEventRecord( stop );
cudaEventSynchronize( stop );
float duration = 0.0f;
cudaEventElapsedTime( &duration, start, stop );
cout << "time: " << duration << "ms\n";
//Placement des données dans l'image
ilSetData(out);
// Sauvegarde de l'image
ilEnable(IL_FILE_OVERWRITE);
ilSaveImage("out.jpg");
ilDeleteImages(1, &image);
free(out);
cudaFree(out_d);
cudaFree(data_d);
return 0;
}
|
966278e82d168d38f33101809dd37c88dd019218.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mm.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dA = NULL;
hipMalloc(&dA, XSIZE*YSIZE);
float *dB = NULL;
hipMalloc(&dB, XSIZE*YSIZE);
float *dC = NULL;
hipMalloc(&dC, XSIZE*YSIZE);
int DIM = 2;
int N = XSIZE*YSIZE;
int GPUN = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mm), dim3(gridBlock),dim3(threadBlock), 0, 0, dA,dB,dC,DIM,N,GPUN);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mm), dim3(gridBlock),dim3(threadBlock), 0, 0, dA,dB,dC,DIM,N,GPUN);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mm), dim3(gridBlock),dim3(threadBlock), 0, 0, dA,dB,dC,DIM,N,GPUN);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 966278e82d168d38f33101809dd37c88dd019218.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dA = NULL;
cudaMalloc(&dA, XSIZE*YSIZE);
float *dB = NULL;
cudaMalloc(&dB, XSIZE*YSIZE);
float *dC = NULL;
cudaMalloc(&dC, XSIZE*YSIZE);
int DIM = 2;
int N = XSIZE*YSIZE;
int GPUN = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mm<<<gridBlock,threadBlock>>>(dA,dB,dC,DIM,N,GPUN);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mm<<<gridBlock,threadBlock>>>(dA,dB,dC,DIM,N,GPUN);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mm<<<gridBlock,threadBlock>>>(dA,dB,dC,DIM,N,GPUN);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ea6ead9926494555d005d72cfef29394cc97f0bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cstdio"
#include "gpu_interface.h"
#include "mem_controller.h"
#include "vector"
#include "iostream"
#include "global.h"
#include "iostream"
using namespace std;
__global__ void kernel_calc( unsigned int** src_list_1,
unsigned int** src_list_2,
unsigned int** dst_list,
unsigned int* result,
unsigned int list_len,
unsigned int vlist_len,
unsigned int old_block_pos) {
__shared__ unsigned int sup[MAX_THREAD];
unsigned int* psrc1;
unsigned int* psrc2;
unsigned int* pdst;
unsigned int iter, i, tmp;
int current_block_pos = blockIdx.x + old_block_pos;
unsigned int bound;
if (current_block_pos >= list_len)
return;
sup[threadIdx.x] = 0;
iter = (vlist_len - 1) / blockDim.x + 1;
psrc1 = src_list_1[current_block_pos];
psrc2 = src_list_2[current_block_pos];
pdst = dst_list[current_block_pos];
__syncthreads();
for (i = 0; i < iter; i++) {
int thread_pos = i * blockDim.x + threadIdx.x;
if (thread_pos >= vlist_len)
break;
tmp=psrc1[thread_pos] & psrc2[thread_pos];
sup[threadIdx.x]+=__popc(tmp);
pdst[thread_pos]=tmp;
}
__syncthreads();
for (bound = blockDim.x / 2; bound > 0; bound >>= 1) {
if (threadIdx.x < bound)
sup[threadIdx.x]+=sup[threadIdx.x+bound];
__syncthreads();
}
__syncthreads();
if(threadIdx.x == 0) {
*(result+current_block_pos)=sup[0];
}
}
void ListUnionGPU::initialize(unsigned int size, unsigned int vlen) {
list_size=size;
list_len=0;
vlist_len=vlen;
src_list_1=(unsigned int**)malloc(sizeof(unsigned int *)*list_size);
src_list_2=(unsigned int**)malloc(sizeof(unsigned int *)*list_size);
dst_list=(unsigned int**)malloc(sizeof(unsigned int *)*list_size);
result=(unsigned int*)malloc(sizeof(unsigned int)*list_size);
return;
}
void ListUnionGPU::add_to_tail(unsigned int* psrc1, unsigned int* psrc2, unsigned int* pdst) {
src_list_1[list_len]=psrc1;
src_list_2[list_len]=psrc2;
dst_list[list_len]=pdst;
list_len++;
return;
}
void ListUnionGPU::support_counting() {
unsigned int ** d_src_list_1, **d_src_list_2, **d_dst_list;
unsigned int * d_result;
int i = 0;
time_t begin, end;
hipError_t err;
err = hipMalloc((void **)&d_src_list_1, sizeof(unsigned int *)*list_len);
if(err != hipSuccess) {
cerr << "error in cuda malloc" << endl;
}
err = hipMalloc((void **)&d_src_list_2, sizeof(unsigned int *)*list_len);
if(err != hipSuccess) {
cerr<<"error in cuda malloc"<<endl;
}
err = hipMalloc((void **)&d_dst_list, sizeof(unsigned int *)*list_len);
if(err != hipSuccess) {
cerr<<"error in cuda malloc"<<endl;
}
err = hipMalloc((void **)&d_result, sizeof(unsigned int)*list_len);
if(err != hipSuccess) {
cerr<<"error in cuda malloc"<<endl;
}
err = hipMemcpy(d_src_list_1, src_list_1, sizeof(unsigned int *)*list_len,
hipMemcpyHostToDevice);
if(err != hipSuccess) {
cerr<<"error in cuda mempcy"<<endl;
}
err = hipMemcpy(d_src_list_2, src_list_2, sizeof(unsigned int *)*list_len,
hipMemcpyHostToDevice);
if(err != hipSuccess) {
cerr<<"error in cuda mempcy"<<endl;
}
err = hipMemcpy(d_dst_list, dst_list, sizeof(unsigned int *)*list_len,
hipMemcpyHostToDevice);
if(err != hipSuccess) {
cerr<<"error in cuda mempcy"<<endl;
}
int iter = 0, pos = 0;
iter = (list_len - 1) / MAX_BLOCK + 1;
begin = clock();
for (i = 0; i < iter; i++) {
hipLaunchKernelGGL(( kernel_calc), dim3(MAX_BLOCK), dim3(MAX_THREAD), 0, 0, d_src_list_1,d_src_list_2,d_dst_list,
d_result,list_len,vlist_len,pos);
pos+=MAX_BLOCK;
}
hipDeviceSynchronize();
end = clock();
time_support_counting += (float)(end - begin);
err = hipMemcpy(result, d_result, sizeof(unsigned int)*list_len, hipMemcpyDeviceToHost);
if (err != hipSuccess) {
cerr<<"error in cuda mempcy"<<endl;
}
err = hipFree(d_src_list_1);
if (err != hipSuccess) {
cerr<<"error in cuda free"<<endl;
}
err = hipFree(d_src_list_2);
if (err != hipSuccess) {
cerr<<"error in cuda free"<<endl;
}
err = hipFree(d_dst_list);
if (err != hipSuccess) {
cerr<<"error in cuda free"<<endl;
}
err = hipFree(d_result);
if (err != hipSuccess) {
cerr<<"error in cuda free"<<endl;
}
}
void ListUnionGPU::clear() {
list_len = 0;
}
void ListUnionGPU::destroy() {
list_size=0;
list_len=0;
vlist_len=0;
free(src_list_1);
free(src_list_2);
free(dst_list);
free(result);
return;
}
unsigned int * d_vlist_generator(int vlist_len, GPUMemPool * gmc) {
unsigned int * d_res,* res;
int i;
hipError_t err;
res=(unsigned int *)malloc(sizeof(unsigned int)*vlist_len);
for (i = 0; i < vlist_len; i++) {
res[i] = rand() % 16;
}
d_res = gmc->alloc();
if (d_res == NULL)
cerr << "GPU memory full"<<endl;
err = hipMemcpy(d_res, res, sizeof(unsigned int) * vlist_len, hipMemcpyHostToDevice);
if (err != 0) {
cerr<<"cuda call error in random generator"<<endl;
}
free(res);
return d_res;
}
void ListUnionGPU::debug(bool verification=false) {
int i, j, k;
unsigned int * h_vlist;
hipError_t err;
int sum = 0;
cerr << "list size : " << list_size << " list len : " << list_len
<< " vlist len : " << vlist_len << endl;
if (verification == false) {
h_vlist = (unsigned int *)malloc(sizeof(unsigned int)*vlist_len);
cerr << "list_size = " << list_size << "," << "list_len = "
<< list_len << "," << "vlist_len = " << vlist_len << endl;
cerr << "lists" << endl;
for (i = 0; i < list_len; i++) {
cerr << "list item " << dec << i << endl;
err = hipMemcpy(h_vlist,src_list_1[i], sizeof(unsigned int)*vlist_len,
hipMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
for (j = 0; j < vlist_len; j++) {
int cur_pos = h_vlist[j];
for (k = 31; k >= 0; k--) {
cerr << ((cur_pos >> k) & 0x00000001) << ",";
}
// cerr<<hex<<"("<<cur_pos<<"),";
}
cerr << endl;
err = hipMemcpy(h_vlist,src_list_2[i], sizeof(unsigned int)*vlist_len, hipMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
for (j = 0; j < vlist_len; j++) {
int cur_pos = h_vlist[j];
for (k = 31; k >= 0; k--) {
cerr << ((cur_pos>>k) & 0x00000001)<<",";
}
}
cerr << endl;
err = hipMemcpy(h_vlist, dst_list[i], sizeof(unsigned int)*vlist_len,
hipMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
sum = 0;
for (j = 0; j < vlist_len; j++) {
int cur_pos = h_vlist[j];
for (k = 31; k >= 0; k--) {
cerr << ((cur_pos >> k) & 0x00000001) << ",";
sum += ((cur_pos >> k) & 0x00000001);
}
}
cerr << endl;
cerr << "Results : " << result[i] << endl;
cerr << "Verification : " << sum << endl;
}
cerr << "result: " << endl;
for (i = 0; i < list_len; i++) {
cerr << dec << result[i] << ",";
}
cerr << endl;
free(h_vlist);
} else if (verification == true) {
h_vlist=(unsigned int *)malloc(sizeof(unsigned int)*vlist_len);
for (i = 0; i < list_len; i++) {
err = hipMemcpy(h_vlist, src_list_1[i], sizeof(unsigned int)*vlist_len,
hipMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
err = hipMemcpy(h_vlist, src_list_2[i], sizeof(unsigned int)*vlist_len,
hipMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
err = hipMemcpy(h_vlist, dst_list[i], sizeof(unsigned int)*vlist_len,
hipMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
sum = 0 ;
for (j = 0; j < vlist_len; j++) {
int cur_pos = h_vlist[j];
for (k = 31; k >= 0; k--) {
sum += ((cur_pos >> k) & 0x00000001);
}
}
if (result[i] != sum) {
cerr << "inconsistent result at " << i << endl;
}
}
free(h_vlist);
}
cerr<<"------------------------finish debugging lug-------------------------\n";
}
| ea6ead9926494555d005d72cfef29394cc97f0bc.cu | #include "cuda.h"
#include "cstdio"
#include "gpu_interface.h"
#include "mem_controller.h"
#include "vector"
#include "iostream"
#include "global.h"
#include "iostream"
using namespace std;
__global__ void kernel_calc( unsigned int** src_list_1,
unsigned int** src_list_2,
unsigned int** dst_list,
unsigned int* result,
unsigned int list_len,
unsigned int vlist_len,
unsigned int old_block_pos) {
__shared__ unsigned int sup[MAX_THREAD];
unsigned int* psrc1;
unsigned int* psrc2;
unsigned int* pdst;
unsigned int iter, i, tmp;
int current_block_pos = blockIdx.x + old_block_pos;
unsigned int bound;
if (current_block_pos >= list_len)
return;
sup[threadIdx.x] = 0;
iter = (vlist_len - 1) / blockDim.x + 1;
psrc1 = src_list_1[current_block_pos];
psrc2 = src_list_2[current_block_pos];
pdst = dst_list[current_block_pos];
__syncthreads();
for (i = 0; i < iter; i++) {
int thread_pos = i * blockDim.x + threadIdx.x;
if (thread_pos >= vlist_len)
break;
tmp=psrc1[thread_pos] & psrc2[thread_pos];
sup[threadIdx.x]+=__popc(tmp);
pdst[thread_pos]=tmp;
}
__syncthreads();
for (bound = blockDim.x / 2; bound > 0; bound >>= 1) {
if (threadIdx.x < bound)
sup[threadIdx.x]+=sup[threadIdx.x+bound];
__syncthreads();
}
__syncthreads();
if(threadIdx.x == 0) {
*(result+current_block_pos)=sup[0];
}
}
void ListUnionGPU::initialize(unsigned int size, unsigned int vlen) {
list_size=size;
list_len=0;
vlist_len=vlen;
src_list_1=(unsigned int**)malloc(sizeof(unsigned int *)*list_size);
src_list_2=(unsigned int**)malloc(sizeof(unsigned int *)*list_size);
dst_list=(unsigned int**)malloc(sizeof(unsigned int *)*list_size);
result=(unsigned int*)malloc(sizeof(unsigned int)*list_size);
return;
}
void ListUnionGPU::add_to_tail(unsigned int* psrc1, unsigned int* psrc2, unsigned int* pdst) {
src_list_1[list_len]=psrc1;
src_list_2[list_len]=psrc2;
dst_list[list_len]=pdst;
list_len++;
return;
}
void ListUnionGPU::support_counting() {
unsigned int ** d_src_list_1, **d_src_list_2, **d_dst_list;
unsigned int * d_result;
int i = 0;
time_t begin, end;
cudaError_t err;
err = cudaMalloc((void **)&d_src_list_1, sizeof(unsigned int *)*list_len);
if(err != cudaSuccess) {
cerr << "error in cuda malloc" << endl;
}
err = cudaMalloc((void **)&d_src_list_2, sizeof(unsigned int *)*list_len);
if(err != cudaSuccess) {
cerr<<"error in cuda malloc"<<endl;
}
err = cudaMalloc((void **)&d_dst_list, sizeof(unsigned int *)*list_len);
if(err != cudaSuccess) {
cerr<<"error in cuda malloc"<<endl;
}
err = cudaMalloc((void **)&d_result, sizeof(unsigned int)*list_len);
if(err != cudaSuccess) {
cerr<<"error in cuda malloc"<<endl;
}
err = cudaMemcpy(d_src_list_1, src_list_1, sizeof(unsigned int *)*list_len,
cudaMemcpyHostToDevice);
if(err != cudaSuccess) {
cerr<<"error in cuda mempcy"<<endl;
}
err = cudaMemcpy(d_src_list_2, src_list_2, sizeof(unsigned int *)*list_len,
cudaMemcpyHostToDevice);
if(err != cudaSuccess) {
cerr<<"error in cuda mempcy"<<endl;
}
err = cudaMemcpy(d_dst_list, dst_list, sizeof(unsigned int *)*list_len,
cudaMemcpyHostToDevice);
if(err != cudaSuccess) {
cerr<<"error in cuda mempcy"<<endl;
}
int iter = 0, pos = 0;
iter = (list_len - 1) / MAX_BLOCK + 1;
begin = clock();
for (i = 0; i < iter; i++) {
kernel_calc<<<MAX_BLOCK, MAX_THREAD>>>(d_src_list_1,d_src_list_2,d_dst_list,
d_result,list_len,vlist_len,pos);
pos+=MAX_BLOCK;
}
cudaThreadSynchronize();
end = clock();
time_support_counting += (float)(end - begin);
err = cudaMemcpy(result, d_result, sizeof(unsigned int)*list_len, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
cerr<<"error in cuda mempcy"<<endl;
}
err = cudaFree(d_src_list_1);
if (err != cudaSuccess) {
cerr<<"error in cuda free"<<endl;
}
err = cudaFree(d_src_list_2);
if (err != cudaSuccess) {
cerr<<"error in cuda free"<<endl;
}
err = cudaFree(d_dst_list);
if (err != cudaSuccess) {
cerr<<"error in cuda free"<<endl;
}
err = cudaFree(d_result);
if (err != cudaSuccess) {
cerr<<"error in cuda free"<<endl;
}
}
void ListUnionGPU::clear() {
list_len = 0;
}
void ListUnionGPU::destroy() {
list_size=0;
list_len=0;
vlist_len=0;
free(src_list_1);
free(src_list_2);
free(dst_list);
free(result);
return;
}
unsigned int * d_vlist_generator(int vlist_len, GPUMemPool * gmc) {
unsigned int * d_res,* res;
int i;
cudaError_t err;
res=(unsigned int *)malloc(sizeof(unsigned int)*vlist_len);
for (i = 0; i < vlist_len; i++) {
res[i] = rand() % 16;
}
d_res = gmc->alloc();
if (d_res == NULL)
cerr << "GPU memory full"<<endl;
err = cudaMemcpy(d_res, res, sizeof(unsigned int) * vlist_len, cudaMemcpyHostToDevice);
if (err != 0) {
cerr<<"cuda call error in random generator"<<endl;
}
free(res);
return d_res;
}
void ListUnionGPU::debug(bool verification=false) {
int i, j, k;
unsigned int * h_vlist;
cudaError_t err;
int sum = 0;
cerr << "list size : " << list_size << " list len : " << list_len
<< " vlist len : " << vlist_len << endl;
if (verification == false) {
h_vlist = (unsigned int *)malloc(sizeof(unsigned int)*vlist_len);
cerr << "list_size = " << list_size << "," << "list_len = "
<< list_len << "," << "vlist_len = " << vlist_len << endl;
cerr << "lists" << endl;
for (i = 0; i < list_len; i++) {
cerr << "list item " << dec << i << endl;
err = cudaMemcpy(h_vlist,src_list_1[i], sizeof(unsigned int)*vlist_len,
cudaMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
for (j = 0; j < vlist_len; j++) {
int cur_pos = h_vlist[j];
for (k = 31; k >= 0; k--) {
cerr << ((cur_pos >> k) & 0x00000001) << ",";
}
// cerr<<hex<<"("<<cur_pos<<"),";
}
cerr << endl;
err = cudaMemcpy(h_vlist,src_list_2[i], sizeof(unsigned int)*vlist_len, cudaMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
for (j = 0; j < vlist_len; j++) {
int cur_pos = h_vlist[j];
for (k = 31; k >= 0; k--) {
cerr << ((cur_pos>>k) & 0x00000001)<<",";
}
}
cerr << endl;
err = cudaMemcpy(h_vlist, dst_list[i], sizeof(unsigned int)*vlist_len,
cudaMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
sum = 0;
for (j = 0; j < vlist_len; j++) {
int cur_pos = h_vlist[j];
for (k = 31; k >= 0; k--) {
cerr << ((cur_pos >> k) & 0x00000001) << ",";
sum += ((cur_pos >> k) & 0x00000001);
}
}
cerr << endl;
cerr << "Results : " << result[i] << endl;
cerr << "Verification : " << sum << endl;
}
cerr << "result: " << endl;
for (i = 0; i < list_len; i++) {
cerr << dec << result[i] << ",";
}
cerr << endl;
free(h_vlist);
} else if (verification == true) {
h_vlist=(unsigned int *)malloc(sizeof(unsigned int)*vlist_len);
for (i = 0; i < list_len; i++) {
err = cudaMemcpy(h_vlist, src_list_1[i], sizeof(unsigned int)*vlist_len,
cudaMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
err = cudaMemcpy(h_vlist, src_list_2[i], sizeof(unsigned int)*vlist_len,
cudaMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
err = cudaMemcpy(h_vlist, dst_list[i], sizeof(unsigned int)*vlist_len,
cudaMemcpyDeviceToHost);
if (err != 0) {
cerr << "cuda call error in debug, code = " << err << endl;
}
sum = 0 ;
for (j = 0; j < vlist_len; j++) {
int cur_pos = h_vlist[j];
for (k = 31; k >= 0; k--) {
sum += ((cur_pos >> k) & 0x00000001);
}
}
if (result[i] != sum) {
cerr << "inconsistent result at " << i << endl;
}
}
free(h_vlist);
}
cerr<<"------------------------finish debugging lug-------------------------\n";
}
|
85fb331d4a4c5ec755d5b57954342a188e5bba4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Modifications Copyright (c) Microsoft. */
// The code below is mostly copied from Pytorch PersistentSoftmax.cuh
#include "core/providers/cuda/cu_inc/common.cuh"
#include "softmax_warpwise_impl.cuh"
#include "softmax_blockwise_impl.cuh"
#include "softmax.h"
#include <limits>
namespace onnxruntime {
namespace cuda {
template <typename input_t, typename output_t, typename acc_t, bool is_log_softmax>
void dispatch_warpwise_softmax_forward(hipStream_t stream, output_t* dst, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count) {
if (softmax_elements == 0) {
return;
} else {
int log2_elements = log2_ceil(softmax_elements);
const int next_power_of_two = 1 << log2_elements;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
int warp_size = (next_power_of_two < GPU_WARP_SIZE_HOST) ? next_power_of_two : GPU_WARP_SIZE_HOST;
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) {
case 0: // 1
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 0, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 1: // 2
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 1, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 2: // 4
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 2, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 3: // 8
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 3, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 4: // 16
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 4, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 5: // 32
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 5, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 6: // 64
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 6, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 7: // 128
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 7, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 8: // 256
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 8, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 9: // 512
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 9, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 10: // 1024
hipLaunchKernelGGL(( softmax_warp_forward<input_t, output_t, acc_t, 10, is_log_softmax>)
, dim3(blocks), dim3(threads), 0, stream, dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
default:
break;
}
}
}
#define SPECIALIZED_WRAPWISE_SOFTMAX_IMPL(input_t, output_t, acc_t) \
template void dispatch_warpwise_softmax_forward<input_t, output_t, acc_t, false>(hipStream_t stream, output_t * dst, \
const input_t* src, int softmax_elements, \
int softmax_elements_stride, int batch_count); \
template void dispatch_warpwise_softmax_forward<input_t, output_t, acc_t, true>(hipStream_t stream, output_t * dst, \
const input_t* src, int softmax_elements, \
int softmax_elements_stride, int batch_count);
SPECIALIZED_WRAPWISE_SOFTMAX_IMPL(float, float, float)
SPECIALIZED_WRAPWISE_SOFTMAX_IMPL(half, half, float)
SPECIALIZED_WRAPWISE_SOFTMAX_IMPL(double, double, double)
SPECIALIZED_WRAPWISE_SOFTMAX_IMPL(BFloat16, BFloat16, float)
template <typename input_t, typename output_t, typename acc_t, bool is_log_softmax>
void dispatch_blockwise_softmax_forward(hipStream_t stream, output_t* output, const input_t* input, int softmax_elements,
int input_stride, int output_stride, int batch_count) {
dim3 grid(batch_count);
constexpr int ILP = sizeof(float4) / sizeof(input_t);
dim3 block = SoftMax_getBlockSize(ILP, softmax_elements);
if (is_log_softmax) {
hipLaunchKernelGGL(( softmax_block_forward<ILP, input_t, acc_t, output_t, LogSoftMaxForwardEpilogue>)
, dim3(grid), dim3(block), block.x * sizeof(acc_t), stream, output, const_cast<input_t*>(input),
softmax_elements, input_stride, output_stride);
} else {
hipLaunchKernelGGL(( softmax_block_forward<ILP, input_t, acc_t, output_t, SoftMaxForwardEpilogue>)
, dim3(grid), dim3(block), block.x * sizeof(acc_t), stream, output, const_cast<input_t*>(input),
softmax_elements, input_stride, output_stride);
}
}
#define SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(input_t, output_t, acc_t) \
template void dispatch_blockwise_softmax_forward<input_t, output_t, acc_t, false>( \
hipStream_t stream, output_t * output, const input_t* src, int softmax_elements, \
int input_stride, int output_stride, int batch_count); \
template void dispatch_blockwise_softmax_forward<input_t, output_t, acc_t, true>( \
hipStream_t stream, output_t * output, const input_t* src, int softmax_elements, \
int input_stride, int output_stride, int batch_count);
SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(float, float, float)
SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(half, half, float)
SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(double, double, double)
SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(BFloat16, BFloat16, float)
#ifndef DISABLE_CONTRIB_OPS
SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(half, float, float) // used by BeamSearch op
#endif
} // namespace cuda
} // namespace onnxruntime
| 85fb331d4a4c5ec755d5b57954342a188e5bba4b.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Modifications Copyright (c) Microsoft. */
// The code below is mostly copied from Pytorch PersistentSoftmax.cuh
#include "core/providers/cuda/cu_inc/common.cuh"
#include "softmax_warpwise_impl.cuh"
#include "softmax_blockwise_impl.cuh"
#include "softmax.h"
#include <limits>
namespace onnxruntime {
namespace cuda {
template <typename input_t, typename output_t, typename acc_t, bool is_log_softmax>
void dispatch_warpwise_softmax_forward(cudaStream_t stream, output_t* dst, const input_t* src, int softmax_elements, int softmax_elements_stride, int batch_count) {
if (softmax_elements == 0) {
return;
} else {
int log2_elements = log2_ceil(softmax_elements);
const int next_power_of_two = 1 << log2_elements;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
int warp_size = (next_power_of_two < GPU_WARP_SIZE_HOST) ? next_power_of_two : GPU_WARP_SIZE_HOST;
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_count + batches_per_block - 1) / batches_per_block;
dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) {
case 0: // 1
softmax_warp_forward<input_t, output_t, acc_t, 0, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 1: // 2
softmax_warp_forward<input_t, output_t, acc_t, 1, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 2: // 4
softmax_warp_forward<input_t, output_t, acc_t, 2, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 3: // 8
softmax_warp_forward<input_t, output_t, acc_t, 3, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 4: // 16
softmax_warp_forward<input_t, output_t, acc_t, 4, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 5: // 32
softmax_warp_forward<input_t, output_t, acc_t, 5, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 6: // 64
softmax_warp_forward<input_t, output_t, acc_t, 6, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 7: // 128
softmax_warp_forward<input_t, output_t, acc_t, 7, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 8: // 256
softmax_warp_forward<input_t, output_t, acc_t, 8, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 9: // 512
softmax_warp_forward<input_t, output_t, acc_t, 9, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
case 10: // 1024
softmax_warp_forward<input_t, output_t, acc_t, 10, is_log_softmax>
<<<blocks, threads, 0, stream>>>(dst, src, batch_count, softmax_elements_stride, softmax_elements);
break;
default:
break;
}
}
}
#define SPECIALIZED_WRAPWISE_SOFTMAX_IMPL(input_t, output_t, acc_t) \
template void dispatch_warpwise_softmax_forward<input_t, output_t, acc_t, false>(cudaStream_t stream, output_t * dst, \
const input_t* src, int softmax_elements, \
int softmax_elements_stride, int batch_count); \
template void dispatch_warpwise_softmax_forward<input_t, output_t, acc_t, true>(cudaStream_t stream, output_t * dst, \
const input_t* src, int softmax_elements, \
int softmax_elements_stride, int batch_count);
SPECIALIZED_WRAPWISE_SOFTMAX_IMPL(float, float, float)
SPECIALIZED_WRAPWISE_SOFTMAX_IMPL(half, half, float)
SPECIALIZED_WRAPWISE_SOFTMAX_IMPL(double, double, double)
SPECIALIZED_WRAPWISE_SOFTMAX_IMPL(BFloat16, BFloat16, float)
template <typename input_t, typename output_t, typename acc_t, bool is_log_softmax>
void dispatch_blockwise_softmax_forward(cudaStream_t stream, output_t* output, const input_t* input, int softmax_elements,
int input_stride, int output_stride, int batch_count) {
dim3 grid(batch_count);
constexpr int ILP = sizeof(float4) / sizeof(input_t);
dim3 block = SoftMax_getBlockSize(ILP, softmax_elements);
if (is_log_softmax) {
softmax_block_forward<ILP, input_t, acc_t, output_t, LogSoftMaxForwardEpilogue>
<<<grid, block, block.x * sizeof(acc_t), stream>>>(output, const_cast<input_t*>(input),
softmax_elements, input_stride, output_stride);
} else {
softmax_block_forward<ILP, input_t, acc_t, output_t, SoftMaxForwardEpilogue>
<<<grid, block, block.x * sizeof(acc_t), stream>>>(output, const_cast<input_t*>(input),
softmax_elements, input_stride, output_stride);
}
}
#define SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(input_t, output_t, acc_t) \
template void dispatch_blockwise_softmax_forward<input_t, output_t, acc_t, false>( \
cudaStream_t stream, output_t * output, const input_t* src, int softmax_elements, \
int input_stride, int output_stride, int batch_count); \
template void dispatch_blockwise_softmax_forward<input_t, output_t, acc_t, true>( \
cudaStream_t stream, output_t * output, const input_t* src, int softmax_elements, \
int input_stride, int output_stride, int batch_count);
SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(float, float, float)
SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(half, half, float)
SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(double, double, double)
SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(BFloat16, BFloat16, float)
#ifndef DISABLE_CONTRIB_OPS
SPECIALIZED_BLOCKWISE_SOFTMAX_IMPL(half, float, float) // used by BeamSearch op
#endif
} // namespace cuda
} // namespace onnxruntime
|
72fb6a7efa64f181131b31ceb313eef34ba7069d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _reg_spline_gpu.cu
*
*
* Created by Marc Modat on 24/03/2009.
* Copyright (c) 2009-2018, University College London
* Copyright (c) 2018, NiftyReg Developers.
* All rights reserved.
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifndef _reg_spline_GPU_CU
#define _reg_spline_GPU_CU
#include "_reg_localTransformation_gpu.h"
#include "_reg_localTransformation_kernels.cu"
/* *************************************************************** */
/* *************************************************************** */
void reg_spline_getDeformationField_gpu(nifti_image *controlPointImage,
nifti_image *reference,
float4 **controlPointImageArray_d,
float4 **positionFieldImageArray_d,
int **mask_d,
int activeVoxelNumber,
bool bspline)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int voxelNumber = reference->nx * reference->ny * reference->nz;
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 referenceImageDim = make_int3(reference->nx, reference->ny, reference->nz);
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const int useBSpline = static_cast<int>(bspline);
const float3 controlPointVoxelSpacing = make_float3(
controlPointImage->dx / reference->dx,
controlPointImage->dy / reference->dy,
controlPointImage->dz / reference->dz);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_UseBSpline,&useBSpline,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipBindTexture(0, controlPointTexture, *controlPointImageArray_d, controlPointNumber*sizeof(float4)))
NR_CUDA_SAFE_CALL(hipBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int)))
if(reference->nz>1){
const unsigned int Grid_reg_spline_getDeformationField3D =
(unsigned int)ceilf(sqrtf((float)activeVoxelNumber/(float)(NR_BLOCK->Block_reg_spline_getDeformationField3D)));
dim3 G1(Grid_reg_spline_getDeformationField3D,Grid_reg_spline_getDeformationField3D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getDeformationField3D,1,1);
// 8 floats of shared memory are allocated per thread
hipLaunchKernelGGL(( reg_spline_getDeformationField3D)
, dim3(G1), dim3(B1), NR_BLOCK->Block_reg_spline_getDeformationField3D*8*sizeof(float) , 0,
*positionFieldImageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_spline_getDeformationField2D =
(unsigned int)ceilf(sqrtf((float)activeVoxelNumber/(float)(NR_BLOCK->Block_reg_spline_getDeformationField2D)));
dim3 G1(Grid_reg_spline_getDeformationField2D,Grid_reg_spline_getDeformationField2D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getDeformationField2D,1,1);
// 4 floats of shared memory are allocated per thread
hipLaunchKernelGGL(( reg_spline_getDeformationField2D)
, dim3(G1), dim3(B1), NR_BLOCK->Block_reg_spline_getDeformationField2D*4*sizeof(float) , 0,
*positionFieldImageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(hipUnbindTexture(controlPointTexture))
NR_CUDA_SAFE_CALL(hipUnbindTexture(maskTexture))
return;
}
/* *************************************************************** */
/* *************************************************************** */
float reg_spline_approxBendingEnergy_gpu(nifti_image *controlPointImage,
float4 **controlPointImageArray_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const int controlPointGridMem = controlPointNumber*sizeof(float4);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem))
// First compute all the second derivatives
float4 *secondDerivativeValues_d;
if(controlPointImage->nz>1){
NR_CUDA_SAFE_CALL(hipMalloc(&secondDerivativeValues_d, 6*controlPointGridMem))
const unsigned int Grid_bspline_getApproxSecondDerivatives =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D)));
dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D,1,1);
hipLaunchKernelGGL(( reg_spline_getApproxSecondDerivatives3D) , dim3(G1), dim3(B1) , 0, 0, secondDerivativeValues_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
NR_CUDA_SAFE_CALL(hipMalloc(&secondDerivativeValues_d, 3*controlPointGridMem))
const unsigned int Grid_bspline_getApproxSecondDerivatives =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D)));
dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D,1,1);
hipLaunchKernelGGL(( reg_spline_getApproxSecondDerivatives2D) , dim3(G1), dim3(B1) , 0, 0, secondDerivativeValues_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(hipUnbindTexture(controlPointTexture))
// Compute the bending energy from the second derivatives
float *penaltyTerm_d;
NR_CUDA_SAFE_CALL(hipMalloc(&penaltyTerm_d, controlPointNumber*sizeof(float)))
if(controlPointImage->nz>1){
NR_CUDA_SAFE_CALL(hipBindTexture(0,secondDerivativesTexture,
secondDerivativeValues_d,
6*controlPointGridMem))
const unsigned int Grid_reg_spline_ApproxBendingEnergy =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy3D)));
dim3 G2(Grid_reg_spline_ApproxBendingEnergy,Grid_reg_spline_ApproxBendingEnergy,1);
dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy3D,1,1);
hipLaunchKernelGGL(( reg_spline_getApproxBendingEnergy3D_kernel) , dim3(G2), dim3(B2) , 0, 0, penaltyTerm_d);
NR_CUDA_CHECK_KERNEL(G2,B2)
}
else{
NR_CUDA_SAFE_CALL(hipBindTexture(0,secondDerivativesTexture,
secondDerivativeValues_d,
3*controlPointGridMem))
const unsigned int Grid_reg_spline_ApproxBendingEnergy =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy2D)));
dim3 G2(Grid_reg_spline_ApproxBendingEnergy,Grid_reg_spline_ApproxBendingEnergy,1);
dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy2D,1,1);
hipLaunchKernelGGL(( reg_spline_getApproxBendingEnergy2D_kernel) , dim3(G2), dim3(B2) , 0, 0, penaltyTerm_d);
NR_CUDA_CHECK_KERNEL(G2,B2)
}
NR_CUDA_SAFE_CALL(hipUnbindTexture(secondDerivativesTexture))
NR_CUDA_SAFE_CALL(hipFree(secondDerivativeValues_d))
// Compute the mean bending energy value
double penaltyValue=reg_sumReduction_gpu(penaltyTerm_d,controlPointNumber);
NR_CUDA_SAFE_CALL(hipFree(penaltyTerm_d))
return (float)(penaltyValue/(double)controlPointImage->nvox);
}
/* *************************************************************** */
/* *************************************************************** */
void reg_spline_approxBendingEnergyGradient_gpu(nifti_image *controlPointImage,
float4 **controlPointImageArray_d,
float4 **nodeGradientArray_d,
float bendingEnergyWeight)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const int controlPointGridMem = controlPointNumber*sizeof(float4);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem))
// First compute all the second derivatives
float4 *secondDerivativeValues_d;
if(controlPointImage->nz>1){
NR_CUDA_SAFE_CALL(hipMalloc(&secondDerivativeValues_d, 6*controlPointNumber*sizeof(float4)))
const unsigned int Grid_bspline_getApproxSecondDerivatives =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D)));
dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D,1,1);
hipLaunchKernelGGL(( reg_spline_getApproxSecondDerivatives3D) , dim3(G1), dim3(B1) , 0, 0, secondDerivativeValues_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
NR_CUDA_SAFE_CALL(hipMalloc(&secondDerivativeValues_d, 3*controlPointNumber*sizeof(float4)))
const unsigned int Grid_bspline_getApproxSecondDerivatives =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D)));
dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D,1,1);
hipLaunchKernelGGL(( reg_spline_getApproxSecondDerivatives2D) , dim3(G1), dim3(B1) , 0, 0, secondDerivativeValues_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(hipUnbindTexture(controlPointTexture))
// Compute the gradient
bendingEnergyWeight *= 1.f / (float)controlPointNumber;
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_Weight,&bendingEnergyWeight,sizeof(float)))
if(controlPointImage->nz>1){
NR_CUDA_SAFE_CALL(hipBindTexture(0,secondDerivativesTexture,
secondDerivativeValues_d,
6*controlPointNumber*sizeof(float4)))
const unsigned int Grid_reg_spline_getApproxBendingEnergyGradient =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient3D)));
dim3 G2(Grid_reg_spline_getApproxBendingEnergyGradient,Grid_reg_spline_getApproxBendingEnergyGradient,1);
dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient3D,1,1);
hipLaunchKernelGGL(( reg_spline_getApproxBendingEnergyGradient3D_kernel) , dim3(G2), dim3(B2) , 0, 0, *nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G2,B2)
}
else{
NR_CUDA_SAFE_CALL(hipBindTexture(0,secondDerivativesTexture,
secondDerivativeValues_d,
3*controlPointNumber*sizeof(float4)))
const unsigned int Grid_reg_spline_getApproxBendingEnergyGradient =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient2D)));
dim3 G2(Grid_reg_spline_getApproxBendingEnergyGradient,Grid_reg_spline_getApproxBendingEnergyGradient,1);
dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient2D,1,1);
hipLaunchKernelGGL(( reg_spline_getApproxBendingEnergyGradient2D_kernel) , dim3(G2), dim3(B2) , 0, 0, *nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G2,B2)
}
NR_CUDA_SAFE_CALL(hipUnbindTexture(secondDerivativesTexture))
NR_CUDA_SAFE_CALL(hipFree(secondDerivativeValues_d))
return;
}
/* *************************************************************** */
/* *************************************************************** */
void reg_spline_ComputeApproxJacobianValues(nifti_image *controlPointImage,
float4 **controlPointImageArray_d,
float **jacobianMatrices_d,
float **jacobianDet_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// Need to reorient the Jacobian matrix using the header information - real to voxel conversion
mat33 reorientation;
if(controlPointImage->sform_code>0)
reorientation=reg_mat44_to_mat33(&controlPointImage->sto_xyz);
else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_xyz);
float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3)))
// Bind some variables
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz);
const int controlPointGridMem = controlPointNumber*sizeof(float4);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3)))
NR_CUDA_SAFE_CALL(hipBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem))
// The Jacobian matrix is computed for every control point
if(controlPointImage->nz>1){
const unsigned int Grid_reg_spline_getApproxJacobianValues3D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxJacobianValues3D)));
dim3 G1(Grid_reg_spline_getApproxJacobianValues3D,Grid_reg_spline_getApproxJacobianValues3D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxJacobianValues3D,1,1);
hipLaunchKernelGGL(( reg_spline_getApproxJacobianValues3D_kernel), dim3(G1), dim3(B1), 0, 0, *jacobianMatrices_d, *jacobianDet_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_spline_getApproxJacobianValues2D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxJacobianValues2D)));
dim3 G1(Grid_reg_spline_getApproxJacobianValues2D,Grid_reg_spline_getApproxJacobianValues2D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxJacobianValues2D,1,1);
hipLaunchKernelGGL(( reg_spline_getApproxJacobianValues2D_kernel), dim3(G1), dim3(B1), 0, 0, *jacobianMatrices_d, *jacobianDet_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(hipUnbindTexture(controlPointTexture))
}
/* *************************************************************** */
void reg_spline_ComputeJacobianValues(nifti_image *controlPointImage,
nifti_image *referenceImage,
float4 **controlPointImageArray_d,
float **jacobianMatrices_d,
float **jacobianDet_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// Need to reorient the Jacobian matrix using the header information - real to voxel conversion
mat33 reorientation;
if(controlPointImage->sform_code>0)
reorientation=reg_mat44_to_mat33(&controlPointImage->sto_xyz);
else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_xyz);
float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3)))
// Bind some variables
const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz;
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz);
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz);
const float3 controlPointVoxelSpacing = make_float3(
controlPointImage->dx / referenceImage->dx,
controlPointImage->dy / referenceImage->dy,
controlPointImage->dz / referenceImage->dz);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3)))
NR_CUDA_SAFE_CALL(hipBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointNumber*sizeof(float4)))
// The Jacobian matrix is computed for every voxel
if(controlPointImage->nz>1){
const unsigned int Grid_reg_spline_getJacobianValues3D =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_spline_getJacobianValues3D)));
dim3 G1(Grid_reg_spline_getJacobianValues3D,Grid_reg_spline_getJacobianValues3D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getJacobianValues3D,1,1);
// 8 floats of shared memory are allocated per thread
hipLaunchKernelGGL(( reg_spline_getJacobianValues3D_kernel)
, dim3(G1), dim3(B1), NR_BLOCK->Block_reg_spline_getJacobianValues3D*8*sizeof(float), 0,
*jacobianMatrices_d, *jacobianDet_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_spline_getJacobianValues2D =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_spline_getJacobianValues2D)));
dim3 G1(Grid_reg_spline_getJacobianValues2D,Grid_reg_spline_getJacobianValues2D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getJacobianValues2D,1,1);
hipLaunchKernelGGL(( reg_spline_getJacobianValues2D_kernel)
, dim3(G1), dim3(B1), 0, 0,
*jacobianMatrices_d, *jacobianDet_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(hipUnbindTexture(controlPointTexture))
}
/* *************************************************************** */
/* *************************************************************** */
double reg_spline_getJacobianPenaltyTerm_gpu(nifti_image *referenceImage,
nifti_image *controlPointImage,
float4 **controlPointImageArray_d,
bool approx
)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// The Jacobian matrices and determinants are computed
float *jacobianMatrices_d;
float *jacobianDet_d;
int jacNumber;
double jacSum;
if(approx){
jacNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
jacSum = (controlPointImage->nx-2)*(controlPointImage->ny-2);
if(controlPointImage->nz>1){
jacSum *= controlPointImage->nz-2;
// Allocate array for 3x3 matrices
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
}
else{
// Allocate array for 2x2 matrices
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float)))
}
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeApproxJacobianValues(controlPointImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
else{
jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz;
jacSum=jacNumber;
if(controlPointImage->nz>1){
// Allocate array for 3x3 matrices
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
}
else{
// Allocate array for 2x2 matrices
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float)))
}
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeJacobianValues(controlPointImage,
referenceImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
NR_CUDA_SAFE_CALL(hipFree(jacobianMatrices_d))
// The Jacobian determinant are squared and logged (might not be english but will do)
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&jacNumber,sizeof(int)))
const unsigned int Grid_reg_spline_logSquaredValues =
(unsigned int)ceilf(sqrtf((float)jacNumber/(float)(NR_BLOCK->Block_reg_spline_logSquaredValues)));
dim3 G1(Grid_reg_spline_logSquaredValues,Grid_reg_spline_logSquaredValues,1);
dim3 B1(NR_BLOCK->Block_reg_spline_logSquaredValues,1,1);
hipLaunchKernelGGL(( reg_spline_logSquaredValues_kernel), dim3(G1), dim3(B1), 0, 0, jacobianDet_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
// Perform the reduction
double penaltyTermValue = reg_sumReduction_gpu(jacobianDet_d,jacNumber);
NR_CUDA_SAFE_CALL(hipFree(jacobianDet_d))
return penaltyTermValue/jacSum;
}
/* *************************************************************** */
void reg_spline_getJacobianPenaltyTermGradient_gpu(nifti_image *referenceImage,
nifti_image *controlPointImage,
float4 **controlPointImageArray_d,
float4 **nodeGradientArray_d,
float jacobianWeight,
bool approx)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// The Jacobian matrices and determinants are computed
float *jacobianMatrices_d;
float *jacobianDet_d;
int jacNumber;
if(approx){
jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
if(controlPointImage->nz>1)
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
else NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeApproxJacobianValues(controlPointImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
else{
jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz;
if(controlPointImage->nz>1)
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
else NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeJacobianValues(controlPointImage,
referenceImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
// Need to desorient the Jacobian matrix using the header information - voxel to real conversion
mat33 reorientation;
if(controlPointImage->sform_code>0)
reorientation=reg_mat44_to_mat33(&controlPointImage->sto_ijk);
else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_ijk);
float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3)))
NR_CUDA_SAFE_CALL(hipBindTexture(0,jacobianDeterminantTexture, jacobianDet_d,
jacNumber*sizeof(float)))
if(controlPointImage->nz>1)
NR_CUDA_SAFE_CALL(hipBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d,
9*jacNumber*sizeof(float)))
else NR_CUDA_SAFE_CALL(hipBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d,
4*jacNumber*sizeof(float)))
// Bind some variables
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3)))
float3 weight=make_float3(
referenceImage->dx*jacobianWeight / ((float)jacNumber*controlPointImage->dx),
referenceImage->dy*jacobianWeight / ((float)jacNumber*controlPointImage->dy),
referenceImage->dz*jacobianWeight / ((float)jacNumber*controlPointImage->dz));
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_Weight3,&weight,sizeof(float3)))
if(approx){
if(controlPointImage->nz>1){
const unsigned int Grid_reg_spline_computeApproxJacGradient3D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeApproxJacGradient3D)));
dim3 G1(Grid_reg_spline_computeApproxJacGradient3D,Grid_reg_spline_computeApproxJacGradient3D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_computeApproxJacGradient3D,1,1);
hipLaunchKernelGGL(( reg_spline_computeApproxJacGradient3D_kernel), dim3(G1), dim3(B1), 0, 0, *nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_spline_computeApproxJacGradient2D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeApproxJacGradient2D)));
dim3 G1(Grid_reg_spline_computeApproxJacGradient2D,Grid_reg_spline_computeApproxJacGradient2D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_computeApproxJacGradient2D,1,1);
hipLaunchKernelGGL(( reg_spline_computeApproxJacGradient2D_kernel), dim3(G1), dim3(B1), 0, 0, *nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
}
else{
const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz;
const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz);
const float3 controlPointVoxelSpacing = make_float3(
controlPointImage->dx / referenceImage->dx,
controlPointImage->dy / referenceImage->dy,
controlPointImage->dz / referenceImage->dz);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3)))
if(controlPointImage->nz>1){
const unsigned int Grid_reg_spline_computeJacGradient3D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeJacGradient3D)));
dim3 G1(Grid_reg_spline_computeJacGradient3D,Grid_reg_spline_computeJacGradient3D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_computeJacGradient3D,1,1);
hipLaunchKernelGGL(( reg_spline_computeJacGradient3D_kernel), dim3(G1), dim3(B1), 0, 0, *nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_spline_computeJacGradient2D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeJacGradient2D)));
dim3 G1(Grid_reg_spline_computeJacGradient2D,Grid_reg_spline_computeJacGradient2D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_computeJacGradient2D,1,1);
hipLaunchKernelGGL(( reg_spline_computeJacGradient2D_kernel), dim3(G1), dim3(B1), 0, 0, *nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
}
NR_CUDA_SAFE_CALL(hipUnbindTexture(jacobianDeterminantTexture))
NR_CUDA_SAFE_CALL(hipUnbindTexture(jacobianMatricesTexture))
NR_CUDA_SAFE_CALL(hipFree(jacobianDet_d))
NR_CUDA_SAFE_CALL(hipFree(jacobianMatrices_d))
}
/* *************************************************************** */
double reg_spline_correctFolding_gpu(nifti_image *referenceImage,
nifti_image *controlPointImage,
float4 **controlPointImageArray_d,
bool approx)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// The Jacobian matrices and determinants are computed
float *jacobianMatrices_d;
float *jacobianDet_d;
int jacNumber;
double jacSum;
if(approx){
jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
jacSum = (controlPointImage->nx-2)*(controlPointImage->ny-2)*(controlPointImage->nz-2);
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeApproxJacobianValues(controlPointImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
else{
jacSum=jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz;
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeJacobianValues(controlPointImage,
referenceImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
// Check if the Jacobian determinant average
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&jacNumber,sizeof(int)))
float *jacobianDet2_d;
NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet2_d,jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(hipMemcpy(jacobianDet2_d,jacobianDet_d,jacNumber*sizeof(float),hipMemcpyDeviceToDevice))
const unsigned int Grid_reg_spline_logSquaredValues =
(unsigned int)ceilf(sqrtf((float)jacNumber/(float)(NR_BLOCK->Block_reg_spline_logSquaredValues)));
dim3 G1(Grid_reg_spline_logSquaredValues,Grid_reg_spline_logSquaredValues,1);
dim3 B1(NR_BLOCK->Block_reg_spline_logSquaredValues,1,1);
hipLaunchKernelGGL(( reg_spline_logSquaredValues_kernel), dim3(G1), dim3(B1), 0, 0, jacobianDet2_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
float *jacobianDet_h;
NR_CUDA_SAFE_CALL(hipHostMalloc(&jacobianDet_h,jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(hipMemcpy(jacobianDet_h,jacobianDet2_d,
jacNumber*sizeof(float),
hipMemcpyDeviceToHost))
NR_CUDA_SAFE_CALL(hipFree(jacobianDet2_d))
double penaltyTermValue=0.;
for(int i=0;i<jacNumber;++i) penaltyTermValue += jacobianDet_h[i];
NR_CUDA_SAFE_CALL(hipHostFree(jacobianDet_h))
penaltyTermValue /= jacSum;
if(penaltyTermValue==penaltyTermValue){
NR_CUDA_SAFE_CALL(hipFree(jacobianDet_d))
NR_CUDA_SAFE_CALL(hipFree(jacobianMatrices_d))
return penaltyTermValue;
}
// Need to desorient the Jacobian matrix using the header information - voxel to real conversion
mat33 reorientation;
if(controlPointImage->sform_code>0)
reorientation=reg_mat44_to_mat33(&controlPointImage->sto_ijk);
else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_ijk);
float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3)))
NR_CUDA_SAFE_CALL(hipBindTexture(0,jacobianDeterminantTexture, jacobianDet_d,
jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(hipBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d,
9*jacNumber*sizeof(float)))
// Bind some variables
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3)))
if(approx){
const unsigned int Grid_reg_spline_approxCorrectFolding =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_approxCorrectFolding3D)));
dim3 G1(Grid_reg_spline_approxCorrectFolding,Grid_reg_spline_approxCorrectFolding,1);
dim3 B1(NR_BLOCK->Block_reg_spline_approxCorrectFolding3D,1,1);
hipLaunchKernelGGL(( reg_spline_approxCorrectFolding3D_kernel), dim3(G1), dim3(B1), 0, 0, *controlPointImageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz;
const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz);
const float3 controlPointVoxelSpacing = make_float3(
controlPointImage->dx / referenceImage->dx,
controlPointImage->dy / referenceImage->dy,
controlPointImage->dz / referenceImage->dz);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3)))
const unsigned int Grid_reg_spline_correctFolding =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_correctFolding3D)));
dim3 G1(Grid_reg_spline_correctFolding,Grid_reg_spline_correctFolding,1);
dim3 B1(NR_BLOCK->Block_reg_spline_correctFolding3D,1,1);
hipLaunchKernelGGL(( reg_spline_correctFolding3D_kernel), dim3(G1), dim3(B1), 0, 0, *controlPointImageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(hipUnbindTexture(jacobianDeterminantTexture))
NR_CUDA_SAFE_CALL(hipUnbindTexture(jacobianMatricesTexture))
NR_CUDA_SAFE_CALL(hipFree(jacobianDet_d))
NR_CUDA_SAFE_CALL(hipFree(jacobianMatrices_d))
return std::numeric_limits<double>::quiet_NaN();
}
/* *************************************************************** */
/* *************************************************************** */
void reg_getDeformationFromDisplacement_gpu( nifti_image *image, float4 **imageArray_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// Bind the qform or sform
mat44 temp_mat=image->qto_xyz;
if(image->sform_code>0) temp_mat=image->sto_xyz;
float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4)))
const int voxelNumber=image->nx*image->ny*image->nz;
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
const int3 imageDim=make_int3(image->nx,image->ny,image->nz);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&imageDim,sizeof(int3)))
const unsigned int Grid_reg_getDeformationFromDisplacement =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_getDeformationFromDisplacement)));
dim3 G1(Grid_reg_getDeformationFromDisplacement,Grid_reg_getDeformationFromDisplacement,1);
dim3 B1(NR_BLOCK->Block_reg_getDeformationFromDisplacement,1,1);
hipLaunchKernelGGL(( reg_getDeformationFromDisplacement3D_kernel), dim3(G1), dim3(B1), 0, 0, *imageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
/* *************************************************************** */
/* *************************************************************** */
void reg_getDisplacementFromDeformation_gpu( nifti_image *image, float4 **imageArray_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// Bind the qform or sform
mat44 temp_mat=image->qto_xyz;
if(image->sform_code>0) temp_mat=image->sto_xyz;
float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4)))
const int voxelNumber=image->nx*image->ny*image->nz;
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
const int3 imageDim=make_int3(image->nx,image->ny,image->nz);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&imageDim,sizeof(int3)))
const unsigned int Grid_reg_getDisplacementFromDeformation =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_getDisplacementFromDeformation)));
dim3 G1(Grid_reg_getDisplacementFromDeformation,Grid_reg_getDisplacementFromDeformation,1);
dim3 B1(NR_BLOCK->Block_reg_getDisplacementFromDeformation,1,1);
hipLaunchKernelGGL(( reg_getDisplacementFromDeformation3D_kernel), dim3(G1), dim3(B1), 0, 0, *imageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
/* *************************************************************** */
/* *************************************************************** */
void reg_getDeformationFieldFromVelocityGrid_gpu(nifti_image *cpp_h,
nifti_image *def_h,
float4 **cpp_gpu,
float4 **def_gpu)
{
const int voxelNumber = def_h->nx * def_h->ny * def_h->nz;
// Create a mask array where no voxel are excluded
int *mask_gpu=NULL;
NR_CUDA_SAFE_CALL(hipMalloc(&mask_gpu, voxelNumber*sizeof(int)))
reg_fillMaskArray_gpu(voxelNumber,&mask_gpu);
// Define some variables for the deformation fields
float4 *tempDef_gpu=NULL;
NR_CUDA_SAFE_CALL(hipMalloc(&tempDef_gpu,voxelNumber*sizeof(float4)))
// The deformation field is computed
reg_spline_getDeformationField_gpu(cpp_h,
def_h,
cpp_gpu,
def_gpu,
&mask_gpu,
voxelNumber,
true); // non-interpolant spline are used
// The deformation field is converted into a displacement field
reg_getDisplacementFromDeformation_gpu(def_h,def_gpu);
// Scaling of the deformation field
float scalingValue = pow(2.0f,fabs(cpp_h->intent_p1));
if(cpp_h->intent_p1<0)
// backward deformation field is scaled down
reg_multiplyValue_gpu(voxelNumber,
def_gpu,
-1.f/scalingValue);
else
// forward deformation field is scaled down
reg_multiplyValue_gpu(voxelNumber,
def_gpu,
1.f/scalingValue);
// The displacement field is converted back into a deformation field
reg_getDeformationFromDisplacement_gpu(def_h,def_gpu);
// The deformation field is squared
unsigned int squaringNumber = (unsigned int)fabs(cpp_h->intent_p1);
for(unsigned int i=0;i<squaringNumber;++i){
// The deformation field arrays are updated
NR_CUDA_SAFE_CALL(hipMemcpy(tempDef_gpu,*def_gpu,voxelNumber*sizeof(float4),hipMemcpyDeviceToDevice))
// The deformation fields are composed
reg_defField_compose_gpu(def_h,
&tempDef_gpu,
def_gpu,
&mask_gpu,
voxelNumber);
}
NR_CUDA_SAFE_CALL(hipFree(tempDef_gpu))
NR_CUDA_SAFE_CALL(hipFree(mask_gpu))
}
/* *************************************************************** */
/* *************************************************************** */
void reg_defField_compose_gpu(nifti_image *def,
float4 **def_gpu,
float4 **defOut_gpu,
int **mask_gpu,
int activeVoxel)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int voxelNumber=def->nx*def->ny*def->nz;
// Bind the qform or sform
mat44 temp_mat=def->qto_ijk;
if(def->sform_code>0) temp_mat=def->sto_ijk;
float4 temp;
temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4)))
temp_mat=def->qto_xyz;
if(def->sform_code>0) temp_mat=def->sto_xyz;
temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0c,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1c,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2c,&temp,sizeof(float4)))
const int3 referenceImageDim=make_int3(def->nx,def->ny,def->nz);
NR_CUDA_SAFE_CALL(hipBindTexture(0,voxelDeformationTexture,*def_gpu,activeVoxel*sizeof(float4)))
NR_CUDA_SAFE_CALL(hipBindTexture(0,maskTexture,*mask_gpu,activeVoxel*sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3)))
if(def->nz>1){
const unsigned int Grid_reg_defField_compose3D =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_defField_compose3D)));
dim3 G1(Grid_reg_defField_compose3D,Grid_reg_defField_compose3D,1);
dim3 B1(NR_BLOCK->Block_reg_defField_compose3D,1,1);
hipLaunchKernelGGL(( reg_defField_compose3D_kernel), dim3(G1), dim3(B1), 0, 0, *defOut_gpu);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_defField_compose2D =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_defField_compose2D)));
dim3 G1(Grid_reg_defField_compose2D,Grid_reg_defField_compose2D,1);
dim3 B1(NR_BLOCK->Block_reg_defField_compose2D,1,1);
hipLaunchKernelGGL(( reg_defField_compose2D_kernel), dim3(G1), dim3(B1), 0, 0, *defOut_gpu);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(hipUnbindTexture(voxelDeformationTexture))
NR_CUDA_SAFE_CALL(hipUnbindTexture(maskTexture))
}
/* *************************************************************** */
/* *************************************************************** */
void reg_defField_getJacobianMatrix_gpu(nifti_image *deformationField,
float4 **deformationField_gpu,
float **jacobianMatrices_gpu)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int3 referenceDim=make_int3(deformationField->nx,deformationField->ny,deformationField->nz);
const float3 referenceSpacing=make_float3(deformationField->dx,deformationField->dy,deformationField->dz);
const int voxelNumber = referenceDim.x*referenceDim.y*referenceDim.z;
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceSpacing,&referenceSpacing,sizeof(float3)))
mat33 reorientation;
if(deformationField->sform_code>0)
reorientation=reg_mat44_to_mat33(&deformationField->sto_xyz);
else reorientation=reg_mat44_to_mat33(&deformationField->qto_xyz);
float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]);
NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3)))
NR_CUDA_SAFE_CALL(hipBindTexture(0,voxelDeformationTexture,*deformationField_gpu,voxelNumber*sizeof(float4)))
const unsigned int Grid_reg_defField_getJacobianMatrix =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_defField_getJacobianMatrix)));
dim3 G1(Grid_reg_defField_getJacobianMatrix,Grid_reg_defField_getJacobianMatrix,1);
dim3 B1(NR_BLOCK->Block_reg_defField_getJacobianMatrix);
hipLaunchKernelGGL(( reg_defField_getJacobianMatrix3D_kernel), dim3(G1),dim3(B1), 0, 0, *jacobianMatrices_gpu);
NR_CUDA_CHECK_KERNEL(G1,B1)
NR_CUDA_SAFE_CALL(hipUnbindTexture(voxelDeformationTexture))
}
/* *************************************************************** */
/* *************************************************************** */
#endif
| 72fb6a7efa64f181131b31ceb313eef34ba7069d.cu | /*
* _reg_spline_gpu.cu
*
*
* Created by Marc Modat on 24/03/2009.
* Copyright (c) 2009-2018, University College London
* Copyright (c) 2018, NiftyReg Developers.
* All rights reserved.
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifndef _reg_spline_GPU_CU
#define _reg_spline_GPU_CU
#include "_reg_localTransformation_gpu.h"
#include "_reg_localTransformation_kernels.cu"
/* *************************************************************** */
/* *************************************************************** */
void reg_spline_getDeformationField_gpu(nifti_image *controlPointImage,
nifti_image *reference,
float4 **controlPointImageArray_d,
float4 **positionFieldImageArray_d,
int **mask_d,
int activeVoxelNumber,
bool bspline)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int voxelNumber = reference->nx * reference->ny * reference->nz;
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 referenceImageDim = make_int3(reference->nx, reference->ny, reference->nz);
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const int useBSpline = static_cast<int>(bspline);
const float3 controlPointVoxelSpacing = make_float3(
controlPointImage->dx / reference->dx,
controlPointImage->dy / reference->dy,
controlPointImage->dz / reference->dz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_UseBSpline,&useBSpline,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0, controlPointTexture, *controlPointImageArray_d, controlPointNumber*sizeof(float4)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int)))
if(reference->nz>1){
const unsigned int Grid_reg_spline_getDeformationField3D =
(unsigned int)ceilf(sqrtf((float)activeVoxelNumber/(float)(NR_BLOCK->Block_reg_spline_getDeformationField3D)));
dim3 G1(Grid_reg_spline_getDeformationField3D,Grid_reg_spline_getDeformationField3D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getDeformationField3D,1,1);
// 8 floats of shared memory are allocated per thread
reg_spline_getDeformationField3D
<<< G1, B1, NR_BLOCK->Block_reg_spline_getDeformationField3D*8*sizeof(float) >>>
(*positionFieldImageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_spline_getDeformationField2D =
(unsigned int)ceilf(sqrtf((float)activeVoxelNumber/(float)(NR_BLOCK->Block_reg_spline_getDeformationField2D)));
dim3 G1(Grid_reg_spline_getDeformationField2D,Grid_reg_spline_getDeformationField2D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getDeformationField2D,1,1);
// 4 floats of shared memory are allocated per thread
reg_spline_getDeformationField2D
<<< G1, B1, NR_BLOCK->Block_reg_spline_getDeformationField2D*4*sizeof(float) >>>
(*positionFieldImageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture))
NR_CUDA_SAFE_CALL(cudaUnbindTexture(maskTexture))
return;
}
/* *************************************************************** */
/* *************************************************************** */
float reg_spline_approxBendingEnergy_gpu(nifti_image *controlPointImage,
float4 **controlPointImageArray_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const int controlPointGridMem = controlPointNumber*sizeof(float4);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem))
// First compute all the second derivatives
float4 *secondDerivativeValues_d;
if(controlPointImage->nz>1){
NR_CUDA_SAFE_CALL(cudaMalloc(&secondDerivativeValues_d, 6*controlPointGridMem))
const unsigned int Grid_bspline_getApproxSecondDerivatives =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D)));
dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D,1,1);
reg_spline_getApproxSecondDerivatives3D <<< G1, B1 >>>(secondDerivativeValues_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
NR_CUDA_SAFE_CALL(cudaMalloc(&secondDerivativeValues_d, 3*controlPointGridMem))
const unsigned int Grid_bspline_getApproxSecondDerivatives =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D)));
dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D,1,1);
reg_spline_getApproxSecondDerivatives2D <<< G1, B1 >>>(secondDerivativeValues_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture))
// Compute the bending energy from the second derivatives
float *penaltyTerm_d;
NR_CUDA_SAFE_CALL(cudaMalloc(&penaltyTerm_d, controlPointNumber*sizeof(float)))
if(controlPointImage->nz>1){
NR_CUDA_SAFE_CALL(cudaBindTexture(0,secondDerivativesTexture,
secondDerivativeValues_d,
6*controlPointGridMem))
const unsigned int Grid_reg_spline_ApproxBendingEnergy =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy3D)));
dim3 G2(Grid_reg_spline_ApproxBendingEnergy,Grid_reg_spline_ApproxBendingEnergy,1);
dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy3D,1,1);
reg_spline_getApproxBendingEnergy3D_kernel <<< G2, B2 >>>(penaltyTerm_d);
NR_CUDA_CHECK_KERNEL(G2,B2)
}
else{
NR_CUDA_SAFE_CALL(cudaBindTexture(0,secondDerivativesTexture,
secondDerivativeValues_d,
3*controlPointGridMem))
const unsigned int Grid_reg_spline_ApproxBendingEnergy =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy2D)));
dim3 G2(Grid_reg_spline_ApproxBendingEnergy,Grid_reg_spline_ApproxBendingEnergy,1);
dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergy2D,1,1);
reg_spline_getApproxBendingEnergy2D_kernel <<< G2, B2 >>>(penaltyTerm_d);
NR_CUDA_CHECK_KERNEL(G2,B2)
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(secondDerivativesTexture))
NR_CUDA_SAFE_CALL(cudaFree(secondDerivativeValues_d))
// Compute the mean bending energy value
double penaltyValue=reg_sumReduction_gpu(penaltyTerm_d,controlPointNumber);
NR_CUDA_SAFE_CALL(cudaFree(penaltyTerm_d))
return (float)(penaltyValue/(double)controlPointImage->nvox);
}
/* *************************************************************** */
/* *************************************************************** */
void reg_spline_approxBendingEnergyGradient_gpu(nifti_image *controlPointImage,
float4 **controlPointImageArray_d,
float4 **nodeGradientArray_d,
float bendingEnergyWeight)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const int controlPointGridMem = controlPointNumber*sizeof(float4);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem))
// First compute all the second derivatives
float4 *secondDerivativeValues_d;
if(controlPointImage->nz>1){
NR_CUDA_SAFE_CALL(cudaMalloc(&secondDerivativeValues_d, 6*controlPointNumber*sizeof(float4)))
const unsigned int Grid_bspline_getApproxSecondDerivatives =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D)));
dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives3D,1,1);
reg_spline_getApproxSecondDerivatives3D <<< G1, B1 >>>(secondDerivativeValues_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
NR_CUDA_SAFE_CALL(cudaMalloc(&secondDerivativeValues_d, 3*controlPointNumber*sizeof(float4)))
const unsigned int Grid_bspline_getApproxSecondDerivatives =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D)));
dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxSecondDerivatives2D,1,1);
reg_spline_getApproxSecondDerivatives2D <<< G1, B1 >>>(secondDerivativeValues_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture))
// Compute the gradient
bendingEnergyWeight *= 1.f / (float)controlPointNumber;
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Weight,&bendingEnergyWeight,sizeof(float)))
if(controlPointImage->nz>1){
NR_CUDA_SAFE_CALL(cudaBindTexture(0,secondDerivativesTexture,
secondDerivativeValues_d,
6*controlPointNumber*sizeof(float4)))
const unsigned int Grid_reg_spline_getApproxBendingEnergyGradient =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient3D)));
dim3 G2(Grid_reg_spline_getApproxBendingEnergyGradient,Grid_reg_spline_getApproxBendingEnergyGradient,1);
dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient3D,1,1);
reg_spline_getApproxBendingEnergyGradient3D_kernel <<< G2, B2 >>>(*nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G2,B2)
}
else{
NR_CUDA_SAFE_CALL(cudaBindTexture(0,secondDerivativesTexture,
secondDerivativeValues_d,
3*controlPointNumber*sizeof(float4)))
const unsigned int Grid_reg_spline_getApproxBendingEnergyGradient =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient2D)));
dim3 G2(Grid_reg_spline_getApproxBendingEnergyGradient,Grid_reg_spline_getApproxBendingEnergyGradient,1);
dim3 B2(NR_BLOCK->Block_reg_spline_getApproxBendingEnergyGradient2D,1,1);
reg_spline_getApproxBendingEnergyGradient2D_kernel <<< G2, B2 >>>(*nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G2,B2)
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(secondDerivativesTexture))
NR_CUDA_SAFE_CALL(cudaFree(secondDerivativeValues_d))
return;
}
/* *************************************************************** */
/* *************************************************************** */
void reg_spline_ComputeApproxJacobianValues(nifti_image *controlPointImage,
float4 **controlPointImageArray_d,
float **jacobianMatrices_d,
float **jacobianDet_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// Need to reorient the Jacobian matrix using the header information - real to voxel conversion
mat33 reorientation;
if(controlPointImage->sform_code>0)
reorientation=reg_mat44_to_mat33(&controlPointImage->sto_xyz);
else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_xyz);
float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3)))
// Bind some variables
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz);
const int controlPointGridMem = controlPointNumber*sizeof(float4);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem))
// The Jacobian matrix is computed for every control point
if(controlPointImage->nz>1){
const unsigned int Grid_reg_spline_getApproxJacobianValues3D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxJacobianValues3D)));
dim3 G1(Grid_reg_spline_getApproxJacobianValues3D,Grid_reg_spline_getApproxJacobianValues3D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxJacobianValues3D,1,1);
reg_spline_getApproxJacobianValues3D_kernel<<< G1, B1>>>(*jacobianMatrices_d, *jacobianDet_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_spline_getApproxJacobianValues2D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_getApproxJacobianValues2D)));
dim3 G1(Grid_reg_spline_getApproxJacobianValues2D,Grid_reg_spline_getApproxJacobianValues2D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getApproxJacobianValues2D,1,1);
reg_spline_getApproxJacobianValues2D_kernel<<< G1, B1>>>(*jacobianMatrices_d, *jacobianDet_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture))
}
/* *************************************************************** */
void reg_spline_ComputeJacobianValues(nifti_image *controlPointImage,
nifti_image *referenceImage,
float4 **controlPointImageArray_d,
float **jacobianMatrices_d,
float **jacobianDet_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// Need to reorient the Jacobian matrix using the header information - real to voxel conversion
mat33 reorientation;
if(controlPointImage->sform_code>0)
reorientation=reg_mat44_to_mat33(&controlPointImage->sto_xyz);
else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_xyz);
float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3)))
// Bind some variables
const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz;
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz);
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz);
const float3 controlPointVoxelSpacing = make_float3(
controlPointImage->dx / referenceImage->dx,
controlPointImage->dy / referenceImage->dy,
controlPointImage->dz / referenceImage->dz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointNumber*sizeof(float4)))
// The Jacobian matrix is computed for every voxel
if(controlPointImage->nz>1){
const unsigned int Grid_reg_spline_getJacobianValues3D =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_spline_getJacobianValues3D)));
dim3 G1(Grid_reg_spline_getJacobianValues3D,Grid_reg_spline_getJacobianValues3D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getJacobianValues3D,1,1);
// 8 floats of shared memory are allocated per thread
reg_spline_getJacobianValues3D_kernel
<<< G1, B1, NR_BLOCK->Block_reg_spline_getJacobianValues3D*8*sizeof(float)>>>
(*jacobianMatrices_d, *jacobianDet_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_spline_getJacobianValues2D =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_spline_getJacobianValues2D)));
dim3 G1(Grid_reg_spline_getJacobianValues2D,Grid_reg_spline_getJacobianValues2D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_getJacobianValues2D,1,1);
reg_spline_getJacobianValues2D_kernel
<<< G1, B1>>>
(*jacobianMatrices_d, *jacobianDet_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture))
}
/* *************************************************************** */
/* *************************************************************** */
double reg_spline_getJacobianPenaltyTerm_gpu(nifti_image *referenceImage,
nifti_image *controlPointImage,
float4 **controlPointImageArray_d,
bool approx
)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// The Jacobian matrices and determinants are computed
float *jacobianMatrices_d;
float *jacobianDet_d;
int jacNumber;
double jacSum;
if(approx){
jacNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
jacSum = (controlPointImage->nx-2)*(controlPointImage->ny-2);
if(controlPointImage->nz>1){
jacSum *= controlPointImage->nz-2;
// Allocate array for 3x3 matrices
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
}
else{
// Allocate array for 2x2 matrices
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float)))
}
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeApproxJacobianValues(controlPointImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
else{
jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz;
jacSum=jacNumber;
if(controlPointImage->nz>1){
// Allocate array for 3x3 matrices
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
}
else{
// Allocate array for 2x2 matrices
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float)))
}
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeJacobianValues(controlPointImage,
referenceImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d))
// The Jacobian determinant are squared and logged (might not be english but will do)
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&jacNumber,sizeof(int)))
const unsigned int Grid_reg_spline_logSquaredValues =
(unsigned int)ceilf(sqrtf((float)jacNumber/(float)(NR_BLOCK->Block_reg_spline_logSquaredValues)));
dim3 G1(Grid_reg_spline_logSquaredValues,Grid_reg_spline_logSquaredValues,1);
dim3 B1(NR_BLOCK->Block_reg_spline_logSquaredValues,1,1);
reg_spline_logSquaredValues_kernel<<< G1, B1>>>(jacobianDet_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
// Perform the reduction
double penaltyTermValue = reg_sumReduction_gpu(jacobianDet_d,jacNumber);
NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d))
return penaltyTermValue/jacSum;
}
/* *************************************************************** */
void reg_spline_getJacobianPenaltyTermGradient_gpu(nifti_image *referenceImage,
nifti_image *controlPointImage,
float4 **controlPointImageArray_d,
float4 **nodeGradientArray_d,
float jacobianWeight,
bool approx)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// The Jacobian matrices and determinants are computed
float *jacobianMatrices_d;
float *jacobianDet_d;
int jacNumber;
if(approx){
jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
if(controlPointImage->nz>1)
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
else NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeApproxJacobianValues(controlPointImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
else{
jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz;
if(controlPointImage->nz>1)
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
else NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,4*jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeJacobianValues(controlPointImage,
referenceImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
// Need to desorient the Jacobian matrix using the header information - voxel to real conversion
mat33 reorientation;
if(controlPointImage->sform_code>0)
reorientation=reg_mat44_to_mat33(&controlPointImage->sto_ijk);
else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_ijk);
float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianDeterminantTexture, jacobianDet_d,
jacNumber*sizeof(float)))
if(controlPointImage->nz>1)
NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d,
9*jacNumber*sizeof(float)))
else NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d,
4*jacNumber*sizeof(float)))
// Bind some variables
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3)))
float3 weight=make_float3(
referenceImage->dx*jacobianWeight / ((float)jacNumber*controlPointImage->dx),
referenceImage->dy*jacobianWeight / ((float)jacNumber*controlPointImage->dy),
referenceImage->dz*jacobianWeight / ((float)jacNumber*controlPointImage->dz));
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Weight3,&weight,sizeof(float3)))
if(approx){
if(controlPointImage->nz>1){
const unsigned int Grid_reg_spline_computeApproxJacGradient3D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeApproxJacGradient3D)));
dim3 G1(Grid_reg_spline_computeApproxJacGradient3D,Grid_reg_spline_computeApproxJacGradient3D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_computeApproxJacGradient3D,1,1);
reg_spline_computeApproxJacGradient3D_kernel<<< G1, B1>>>(*nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_spline_computeApproxJacGradient2D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeApproxJacGradient2D)));
dim3 G1(Grid_reg_spline_computeApproxJacGradient2D,Grid_reg_spline_computeApproxJacGradient2D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_computeApproxJacGradient2D,1,1);
reg_spline_computeApproxJacGradient2D_kernel<<< G1, B1>>>(*nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
}
else{
const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz;
const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz);
const float3 controlPointVoxelSpacing = make_float3(
controlPointImage->dx / referenceImage->dx,
controlPointImage->dy / referenceImage->dy,
controlPointImage->dz / referenceImage->dz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3)))
if(controlPointImage->nz>1){
const unsigned int Grid_reg_spline_computeJacGradient3D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeJacGradient3D)));
dim3 G1(Grid_reg_spline_computeJacGradient3D,Grid_reg_spline_computeJacGradient3D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_computeJacGradient3D,1,1);
reg_spline_computeJacGradient3D_kernel<<< G1, B1>>>(*nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_spline_computeJacGradient2D =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_computeJacGradient2D)));
dim3 G1(Grid_reg_spline_computeJacGradient2D,Grid_reg_spline_computeJacGradient2D,1);
dim3 B1(NR_BLOCK->Block_reg_spline_computeJacGradient2D,1,1);
reg_spline_computeJacGradient2D_kernel<<< G1, B1>>>(*nodeGradientArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianDeterminantTexture))
NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianMatricesTexture))
NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d))
NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d))
}
/* *************************************************************** */
double reg_spline_correctFolding_gpu(nifti_image *referenceImage,
nifti_image *controlPointImage,
float4 **controlPointImageArray_d,
bool approx)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// The Jacobian matrices and determinants are computed
float *jacobianMatrices_d;
float *jacobianDet_d;
int jacNumber;
double jacSum;
if(approx){
jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
jacSum = (controlPointImage->nx-2)*(controlPointImage->ny-2)*(controlPointImage->nz-2);
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeApproxJacobianValues(controlPointImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
else{
jacSum=jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz;
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float)))
reg_spline_ComputeJacobianValues(controlPointImage,
referenceImage,
controlPointImageArray_d,
&jacobianMatrices_d,
&jacobianDet_d);
}
// Check if the Jacobian determinant average
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&jacNumber,sizeof(int)))
float *jacobianDet2_d;
NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet2_d,jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(cudaMemcpy(jacobianDet2_d,jacobianDet_d,jacNumber*sizeof(float),cudaMemcpyDeviceToDevice))
const unsigned int Grid_reg_spline_logSquaredValues =
(unsigned int)ceilf(sqrtf((float)jacNumber/(float)(NR_BLOCK->Block_reg_spline_logSquaredValues)));
dim3 G1(Grid_reg_spline_logSquaredValues,Grid_reg_spline_logSquaredValues,1);
dim3 B1(NR_BLOCK->Block_reg_spline_logSquaredValues,1,1);
reg_spline_logSquaredValues_kernel<<< G1, B1>>>(jacobianDet2_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
float *jacobianDet_h;
NR_CUDA_SAFE_CALL(cudaMallocHost(&jacobianDet_h,jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(cudaMemcpy(jacobianDet_h,jacobianDet2_d,
jacNumber*sizeof(float),
cudaMemcpyDeviceToHost))
NR_CUDA_SAFE_CALL(cudaFree(jacobianDet2_d))
double penaltyTermValue=0.;
for(int i=0;i<jacNumber;++i) penaltyTermValue += jacobianDet_h[i];
NR_CUDA_SAFE_CALL(cudaFreeHost(jacobianDet_h))
penaltyTermValue /= jacSum;
if(penaltyTermValue==penaltyTermValue){
NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d))
NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d))
return penaltyTermValue;
}
// Need to desorient the Jacobian matrix using the header information - voxel to real conversion
mat33 reorientation;
if(controlPointImage->sform_code>0)
reorientation=reg_mat44_to_mat33(&controlPointImage->sto_ijk);
else reorientation=reg_mat44_to_mat33(&controlPointImage->qto_ijk);
float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianDeterminantTexture, jacobianDet_d,
jacNumber*sizeof(float)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d,
9*jacNumber*sizeof(float)))
// Bind some variables
const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz;
const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz);
const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3)))
if(approx){
const unsigned int Grid_reg_spline_approxCorrectFolding =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_approxCorrectFolding3D)));
dim3 G1(Grid_reg_spline_approxCorrectFolding,Grid_reg_spline_approxCorrectFolding,1);
dim3 B1(NR_BLOCK->Block_reg_spline_approxCorrectFolding3D,1,1);
reg_spline_approxCorrectFolding3D_kernel<<< G1, B1>>>(*controlPointImageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz;
const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz);
const float3 controlPointVoxelSpacing = make_float3(
controlPointImage->dx / referenceImage->dx,
controlPointImage->dy / referenceImage->dy,
controlPointImage->dz / referenceImage->dz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3)))
const unsigned int Grid_reg_spline_correctFolding =
(unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(NR_BLOCK->Block_reg_spline_correctFolding3D)));
dim3 G1(Grid_reg_spline_correctFolding,Grid_reg_spline_correctFolding,1);
dim3 B1(NR_BLOCK->Block_reg_spline_correctFolding3D,1,1);
reg_spline_correctFolding3D_kernel<<< G1, B1>>>(*controlPointImageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianDeterminantTexture))
NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianMatricesTexture))
NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d))
NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d))
return std::numeric_limits<double>::quiet_NaN();
}
/* *************************************************************** */
/* *************************************************************** */
void reg_getDeformationFromDisplacement_gpu( nifti_image *image, float4 **imageArray_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// Bind the qform or sform
mat44 temp_mat=image->qto_xyz;
if(image->sform_code>0) temp_mat=image->sto_xyz;
float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4)))
const int voxelNumber=image->nx*image->ny*image->nz;
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
const int3 imageDim=make_int3(image->nx,image->ny,image->nz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&imageDim,sizeof(int3)))
const unsigned int Grid_reg_getDeformationFromDisplacement =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_getDeformationFromDisplacement)));
dim3 G1(Grid_reg_getDeformationFromDisplacement,Grid_reg_getDeformationFromDisplacement,1);
dim3 B1(NR_BLOCK->Block_reg_getDeformationFromDisplacement,1,1);
reg_getDeformationFromDisplacement3D_kernel<<< G1, B1>>>(*imageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
/* *************************************************************** */
/* *************************************************************** */
void reg_getDisplacementFromDeformation_gpu( nifti_image *image, float4 **imageArray_d)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
// Bind the qform or sform
mat44 temp_mat=image->qto_xyz;
if(image->sform_code>0) temp_mat=image->sto_xyz;
float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4)))
const int voxelNumber=image->nx*image->ny*image->nz;
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
const int3 imageDim=make_int3(image->nx,image->ny,image->nz);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&imageDim,sizeof(int3)))
const unsigned int Grid_reg_getDisplacementFromDeformation =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_getDisplacementFromDeformation)));
dim3 G1(Grid_reg_getDisplacementFromDeformation,Grid_reg_getDisplacementFromDeformation,1);
dim3 B1(NR_BLOCK->Block_reg_getDisplacementFromDeformation,1,1);
reg_getDisplacementFromDeformation3D_kernel<<< G1, B1>>>(*imageArray_d);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
/* *************************************************************** */
/* *************************************************************** */
void reg_getDeformationFieldFromVelocityGrid_gpu(nifti_image *cpp_h,
nifti_image *def_h,
float4 **cpp_gpu,
float4 **def_gpu)
{
const int voxelNumber = def_h->nx * def_h->ny * def_h->nz;
// Create a mask array where no voxel are excluded
int *mask_gpu=NULL;
NR_CUDA_SAFE_CALL(cudaMalloc(&mask_gpu, voxelNumber*sizeof(int)))
reg_fillMaskArray_gpu(voxelNumber,&mask_gpu);
// Define some variables for the deformation fields
float4 *tempDef_gpu=NULL;
NR_CUDA_SAFE_CALL(cudaMalloc(&tempDef_gpu,voxelNumber*sizeof(float4)))
// The deformation field is computed
reg_spline_getDeformationField_gpu(cpp_h,
def_h,
cpp_gpu,
def_gpu,
&mask_gpu,
voxelNumber,
true); // non-interpolant spline are used
// The deformation field is converted into a displacement field
reg_getDisplacementFromDeformation_gpu(def_h,def_gpu);
// Scaling of the deformation field
float scalingValue = pow(2.0f,fabs(cpp_h->intent_p1));
if(cpp_h->intent_p1<0)
// backward deformation field is scaled down
reg_multiplyValue_gpu(voxelNumber,
def_gpu,
-1.f/scalingValue);
else
// forward deformation field is scaled down
reg_multiplyValue_gpu(voxelNumber,
def_gpu,
1.f/scalingValue);
// The displacement field is converted back into a deformation field
reg_getDeformationFromDisplacement_gpu(def_h,def_gpu);
// The deformation field is squared
unsigned int squaringNumber = (unsigned int)fabs(cpp_h->intent_p1);
for(unsigned int i=0;i<squaringNumber;++i){
// The deformation field arrays are updated
NR_CUDA_SAFE_CALL(cudaMemcpy(tempDef_gpu,*def_gpu,voxelNumber*sizeof(float4),cudaMemcpyDeviceToDevice))
// The deformation fields are composed
reg_defField_compose_gpu(def_h,
&tempDef_gpu,
def_gpu,
&mask_gpu,
voxelNumber);
}
NR_CUDA_SAFE_CALL(cudaFree(tempDef_gpu))
NR_CUDA_SAFE_CALL(cudaFree(mask_gpu))
}
/* *************************************************************** */
/* *************************************************************** */
void reg_defField_compose_gpu(nifti_image *def,
float4 **def_gpu,
float4 **defOut_gpu,
int **mask_gpu,
int activeVoxel)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int voxelNumber=def->nx*def->ny*def->nz;
// Bind the qform or sform
mat44 temp_mat=def->qto_ijk;
if(def->sform_code>0) temp_mat=def->sto_ijk;
float4 temp;
temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4)))
temp_mat=def->qto_xyz;
if(def->sform_code>0) temp_mat=def->sto_xyz;
temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0c,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1c,&temp,sizeof(float4)))
temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2c,&temp,sizeof(float4)))
const int3 referenceImageDim=make_int3(def->nx,def->ny,def->nz);
NR_CUDA_SAFE_CALL(cudaBindTexture(0,voxelDeformationTexture,*def_gpu,activeVoxel*sizeof(float4)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0,maskTexture,*mask_gpu,activeVoxel*sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3)))
if(def->nz>1){
const unsigned int Grid_reg_defField_compose3D =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_defField_compose3D)));
dim3 G1(Grid_reg_defField_compose3D,Grid_reg_defField_compose3D,1);
dim3 B1(NR_BLOCK->Block_reg_defField_compose3D,1,1);
reg_defField_compose3D_kernel<<< G1, B1>>>(*defOut_gpu);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
else{
const unsigned int Grid_reg_defField_compose2D =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_defField_compose2D)));
dim3 G1(Grid_reg_defField_compose2D,Grid_reg_defField_compose2D,1);
dim3 B1(NR_BLOCK->Block_reg_defField_compose2D,1,1);
reg_defField_compose2D_kernel<<< G1, B1>>>(*defOut_gpu);
NR_CUDA_CHECK_KERNEL(G1,B1)
}
NR_CUDA_SAFE_CALL(cudaUnbindTexture(voxelDeformationTexture))
NR_CUDA_SAFE_CALL(cudaUnbindTexture(maskTexture))
}
/* *************************************************************** */
/* *************************************************************** */
void reg_defField_getJacobianMatrix_gpu(nifti_image *deformationField,
float4 **deformationField_gpu,
float **jacobianMatrices_gpu)
{
// Get the BlockSize - The values have been set in _reg_common_gpu.h - cudaCommon_setCUDACard
NiftyReg_CudaBlock100 *NR_BLOCK = NiftyReg_CudaBlock::getInstance(0);
const int3 referenceDim=make_int3(deformationField->nx,deformationField->ny,deformationField->nz);
const float3 referenceSpacing=make_float3(deformationField->dx,deformationField->dy,deformationField->dz);
const int voxelNumber = referenceDim.x*referenceDim.y*referenceDim.z;
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceDim,sizeof(int3)))
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceSpacing,&referenceSpacing,sizeof(float3)))
mat33 reorientation;
if(deformationField->sform_code>0)
reorientation=reg_mat44_to_mat33(&deformationField->sto_xyz);
else reorientation=reg_mat44_to_mat33(&deformationField->qto_xyz);
float3 temp=make_float3(reorientation.m[0][0],reorientation.m[0][1],reorientation.m[0][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[1][0],reorientation.m[1][1],reorientation.m[1][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3)))
temp=make_float3(reorientation.m[2][0],reorientation.m[2][1],reorientation.m[2][2]);
NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3)))
NR_CUDA_SAFE_CALL(cudaBindTexture(0,voxelDeformationTexture,*deformationField_gpu,voxelNumber*sizeof(float4)))
const unsigned int Grid_reg_defField_getJacobianMatrix =
(unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(NR_BLOCK->Block_reg_defField_getJacobianMatrix)));
dim3 G1(Grid_reg_defField_getJacobianMatrix,Grid_reg_defField_getJacobianMatrix,1);
dim3 B1(NR_BLOCK->Block_reg_defField_getJacobianMatrix);
reg_defField_getJacobianMatrix3D_kernel<<<G1,B1>>>(*jacobianMatrices_gpu);
NR_CUDA_CHECK_KERNEL(G1,B1)
NR_CUDA_SAFE_CALL(cudaUnbindTexture(voxelDeformationTexture))
}
/* *************************************************************** */
/* *************************************************************** */
#endif
|
2c1a8d9e350341e38445e056d8c12d99b849a4b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<iostream>
using namespace std;
/* a sum reduction on the array of floats 'in'.
* The reduction result is written to the
* address 'result'. The number of elements to
* be reduced is given by 'size'
*
* The example contains data races because barrier
* synchronisation statements, of the form:
* __syncthreads();
* are missing.
*
* Can you add them to eliminate all data races?
*/
#define N 8 /* Same as blockDim */
#define tid threadIdx.x
__global__ void reduce(int *in, int *result, int size) {
__shared__ int partial_sums[N];
/* Each thread sums elements
in[tid], in[tid + N], in[tid + 2*N], ...
*/
partial_sums[tid] = in[tid];
for(int i = tid + N; i < size; i += N) {
partial_sums[i] += in[i];
}
/* Tree reduction computes final sum into partial_sums[0] */
for(int d = N/2; d > 0; d >>= 1) {
if(tid < d) {
partial_sums[tid] += partial_sums[tid + d];
}
}
/* Master thread writes out result */
if(tid == 0) {
*result = partial_sums[0];
}
}
| 2c1a8d9e350341e38445e056d8c12d99b849a4b9.cu | #include<stdio.h>
#include<iostream>
using namespace std;
/* a sum reduction on the array of floats 'in'.
* The reduction result is written to the
* address 'result'. The number of elements to
* be reduced is given by 'size'
*
* The example contains data races because barrier
* synchronisation statements, of the form:
* __syncthreads();
* are missing.
*
* Can you add them to eliminate all data races?
*/
#define N 8 /* Same as blockDim */
#define tid threadIdx.x
__global__ void reduce(int *in, int *result, int size) {
__shared__ int partial_sums[N];
/* Each thread sums elements
in[tid], in[tid + N], in[tid + 2*N], ...
*/
partial_sums[tid] = in[tid];
for(int i = tid + N; i < size; i += N) {
partial_sums[i] += in[i];
}
/* Tree reduction computes final sum into partial_sums[0] */
for(int d = N/2; d > 0; d >>= 1) {
if(tid < d) {
partial_sums[tid] += partial_sums[tid + d];
}
}
/* Master thread writes out result */
if(tid == 0) {
*result = partial_sums[0];
}
}
|
ecce4f2c534adeb37f9f5e606bfec310f37448d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <chrono>
#include "sha1.h"
#define BLOCK 4096
#define THREAD 256
#define ITER 1024
void checkCudaError(hipError_t msg, int x)
{
if (msg != hipSuccess) {
fprintf(stderr, "line: %d %s\n", x, hipGetErrorString(msg));
exit(1);
}
return;
}
int main()
{
std::chrono::time_point<std::chrono::system_clock> start, end;
double time;
start = std::chrono::system_clock::now();
hipLaunchKernelGGL(( kernel), dim3(BLOCK), dim3(THREAD), 0, 0, ITER);
hipDeviceSynchronize();
end = std::chrono::system_clock::now();
time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.0;
std::cout << "execution time: " << time / 1000.0 << "s." << std::endl;
return 0;
}
| ecce4f2c534adeb37f9f5e606bfec310f37448d9.cu | #include <iostream>
#include <cuda.h>
#include <chrono>
#include "sha1.h"
#define BLOCK 4096
#define THREAD 256
#define ITER 1024
void checkCudaError(cudaError_t msg, int x)
{
if (msg != cudaSuccess) {
fprintf(stderr, "line: %d %s\n", x, cudaGetErrorString(msg));
exit(1);
}
return;
}
int main()
{
std::chrono::time_point<std::chrono::system_clock> start, end;
double time;
start = std::chrono::system_clock::now();
kernel<<<BLOCK, THREAD>>>(ITER);
cudaThreadSynchronize();
end = std::chrono::system_clock::now();
time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.0;
std::cout << "execution time: " << time / 1000.0 << "s." << std::endl;
return 0;
}
|
eb0a51fae68f1023629c1947e7898a31729848ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
template <typename scalar_t>
__global__ void packed_to_padded_tensor_kernel(
const scalar_t* __restrict__ inputs,
const long* __restrict__ first_idxs,
scalar_t* __restrict__ inputs_padded,
const size_t batch_size,
const size_t max_size,
const size_t num_inputs) {
// Batch elements split evenly across blocks (num blocks = batch_size) and
// values for each element split across threads in the block. Each thread adds
// the values of its respective input elements to the global inputs_padded
// tensor.
const size_t tid = threadIdx.x;
const size_t batch_idx = blockIdx.x;
const long start = first_idxs[batch_idx];
const long end =
batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs;
const int num_faces = end - start;
for (size_t f = tid; f < num_faces; f += blockDim.x) {
inputs_padded[batch_idx * max_size + f] = inputs[start + f];
}
}
at::Tensor packed_to_padded_tensor_cuda(
at::Tensor inputs,
at::Tensor first_idxs,
const long max_size) {
const auto num_inputs = inputs.size(0);
const auto batch_size = first_idxs.size(0);
at::Tensor inputs_padded =
at::zeros({batch_size, max_size}, inputs.options());
const int threads = 512;
const int blocks = batch_size;
AT_DISPATCH_FLOATING_TYPES(
inputs.type(), "packed_to_padded_tensor_kernel", ([&] {
hipLaunchKernelGGL(( packed_to_padded_tensor_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
inputs.data_ptr<scalar_t>(),
first_idxs.data_ptr<long>(),
inputs_padded.data_ptr<scalar_t>(),
batch_size,
max_size,
num_inputs);
}));
return inputs_padded;
}
| eb0a51fae68f1023629c1947e7898a31729848ff.cu | // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
template <typename scalar_t>
__global__ void packed_to_padded_tensor_kernel(
const scalar_t* __restrict__ inputs,
const long* __restrict__ first_idxs,
scalar_t* __restrict__ inputs_padded,
const size_t batch_size,
const size_t max_size,
const size_t num_inputs) {
// Batch elements split evenly across blocks (num blocks = batch_size) and
// values for each element split across threads in the block. Each thread adds
// the values of its respective input elements to the global inputs_padded
// tensor.
const size_t tid = threadIdx.x;
const size_t batch_idx = blockIdx.x;
const long start = first_idxs[batch_idx];
const long end =
batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs;
const int num_faces = end - start;
for (size_t f = tid; f < num_faces; f += blockDim.x) {
inputs_padded[batch_idx * max_size + f] = inputs[start + f];
}
}
at::Tensor packed_to_padded_tensor_cuda(
at::Tensor inputs,
at::Tensor first_idxs,
const long max_size) {
const auto num_inputs = inputs.size(0);
const auto batch_size = first_idxs.size(0);
at::Tensor inputs_padded =
at::zeros({batch_size, max_size}, inputs.options());
const int threads = 512;
const int blocks = batch_size;
AT_DISPATCH_FLOATING_TYPES(
inputs.type(), "packed_to_padded_tensor_kernel", ([&] {
packed_to_padded_tensor_kernel<scalar_t><<<blocks, threads>>>(
inputs.data_ptr<scalar_t>(),
first_idxs.data_ptr<long>(),
inputs_padded.data_ptr<scalar_t>(),
batch_size,
max_size,
num_inputs);
}));
return inputs_padded;
}
|
c53df6553fccdd60d2cb2dac3cd8f462ff8e58cc.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#define N 100000
__global__ void kernel_add(int a, int b, int *c){
*c = a + b;
}
int main(int argc, char **argv){
int* host_a = (int*) malloc(sizeof(int));
int* host_b = (int*) malloc(sizeof(int));
int* host_c = (int*) malloc(sizeof(int));
int* device_c;
hipMalloc((void**) &device_c, sizeof(int));
for(int i = 0; i < N; i++){
*host_a = 2;
*host_b = 7;
hipLaunchKernelGGL(( kernel_add), dim3(1),dim3(1), 0, 0, *host_a, *host_b, device_c);
hipMemcpy(host_c, device_c, sizeof(int), hipMemcpyDeviceToHost);
}
hipFree(&device_c);
printf("%d\n", *host_c);
return 0;
}
| c53df6553fccdd60d2cb2dac3cd8f462ff8e58cc.cu | #include <cuda.h>
#include <stdio.h>
#define N 100000
__global__ void kernel_add(int a, int b, int *c){
*c = a + b;
}
int main(int argc, char **argv){
int* host_a = (int*) malloc(sizeof(int));
int* host_b = (int*) malloc(sizeof(int));
int* host_c = (int*) malloc(sizeof(int));
int* device_c;
cudaMalloc((void**) &device_c, sizeof(int));
for(int i = 0; i < N; i++){
*host_a = 2;
*host_b = 7;
kernel_add<<<1,1>>>(*host_a, *host_b, device_c);
cudaMemcpy(host_c, device_c, sizeof(int), cudaMemcpyDeviceToHost);
}
cudaFree(&device_c);
printf("%d\n", *host_c);
return 0;
}
|
e19a753e8bb31c6815b8cd687c5c6db7c2d3aab0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****Implemented first layer of convolution using global memory*******/
/**Implemented First Maxpool Layer**/
/**Measuring time**/
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
// #include<time.h>
#include<float.h>
__constant__ int FIL[32*5*5];
__global__ void conv1(unsigned int *pich, int *resulth, int xsize, int numfilters, int filterdim){
int i,j,k,l;
int sum;
int height;
i = threadIdx.y;
j = threadIdx.x;
l = blockIdx.x;
k=0;
sum =0;
// height = blockIdx.x*(xsize -filterdim +1)*(xsize -filterdim +1);
if(i<(xsize -filterdim +1)&& j<(xsize -filterdim +1)){
sum = (FIL[l*(filterdim*filterdim) + k])*pich[ xsize * (i) + j ] + (FIL[l*(filterdim*filterdim) + k+1])*pich[ xsize*(i) + (j+1) ]
+ FIL[l*(filterdim*filterdim)+ k+2]*pich[ xsize * (i)+(j+2)] + FIL[l*(filterdim*filterdim) +k+3]*pich[xsize * (i)+(j+3)]
+ FIL[l*(filterdim*filterdim) +k+4]*pich[ xsize * (i)+(j+4)]+ FIL[l*(filterdim*filterdim) + k+5]*pich[ xsize*(i+1)+(j) ]
+ FIL[l*(filterdim*filterdim) +k+6]*pich[ xsize * (i+1) + (j+1) ] + FIL[l*(filterdim*filterdim) + k+7]*pich[ xsize*(i+1) + (j+2) ] +
FIL[l*(filterdim*filterdim) +k+8]*pich[ xsize*(i+1) + (j+3) ] + FIL[l*(filterdim*filterdim) +k+9]*pich[ xsize*(i+1) + (j+4) ] +
FIL[l*(filterdim*filterdim) +k+10]*pich[ xsize*(i+2) + (j) ] + FIL[l*(filterdim*filterdim) +k+11]*pich[ xsize * (i+2) + (j+1) ] +
FIL[l*(filterdim*filterdim) +k+12]*pich[ xsize*(i+2) + (j+2)] + FIL[l*(filterdim*filterdim) +k+13]*pich[ xsize*(i+2) + (j+3)]
+FIL[l*(filterdim*filterdim) +k+14]*pich[ xsize*(i+2) + (j+4)] + FIL[l*(filterdim*filterdim) +k+15]*pich[ xsize*(i+3) + (j)]
+ FIL[l*(filterdim*filterdim) +k+16]*pich[ xsize*(i+3) + (j+1)] + FIL[l*(filterdim*filterdim) +k+17]*pich[ xsize*(i+3) + (j+2)]
+ FIL[l*(filterdim*filterdim) +k+18]*pich[ xsize*(i+3) + (j+3)] + FIL[l*(filterdim*filterdim) +k+19]*pich[ xsize*(i+3) + (j+4)]
+ FIL[l*(filterdim*filterdim) +k+20]*pich[ xsize*(i+4) + (j)] +FIL[l*(filterdim*filterdim) +k+21]*pich[ xsize*(i+3) + (j+1)]
+ FIL[l*(filterdim*filterdim) +k+22]*pich[ xsize*(i+4) + (j+2)] + FIL[l*(filterdim*filterdim) +k+23]*pich[ xsize*(i+4) + (j+3)]
+ FIL[l*(filterdim*filterdim) + k+24]*pich[ xsize*(i+4) + (j+4)];
resulth[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j] = sum;
printf("resultgpu[%d][%d]=%d\n",l,i*(xsize - filterdim +1)+j,resulth[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j]);
}
}
__global__ void maxpooling(int *resulth, int *maxpoolh, int xsize, int filterdim, int numfilters){
int i,j,l;
int temp;
i = threadIdx.y;
j = threadIdx.x;
l = blockIdx.x;
if(i<((xsize-filterdim+1)/2)&&(j<((xsize-filterdim+1)/2))){
int a,b,c,d,index, max1, max2;
index = l*((xsize -filterdim +1)*(xsize -filterdim +1))+ threadIdx.x*2 + threadIdx.y*2*(xsize -filterdim +1);
a = resulth[index];
b = resulth[index +1];
c = resulth[index+(xsize-filterdim+1)];
d = resulth[index + (xsize-filterdim+2)];
if(a>b){
max1 = a;
}
else{
max1 = b;
}
if(c>d){
max2 = c;
}
else{
max2 = d;
}
if(max1>max2){
maxpoolh[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j]=max1;
}
else{
maxpoolh[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j] = max2;
}
}
}
int main( int argc, char **argv )
{
int xsize;
int filterdim;
int numfilters;
xsize = 28;
filterdim = 5;
numfilters =32;
int numbytes = xsize*xsize*sizeof(int);
int numbytes2 = (xsize-filterdim+1)*(xsize-filterdim+1)*sizeof(int);
/**Numbytes required for output of first maxpool layer**/
int numbytes3 = ((xsize-filterdim+1)*(xsize-filterdim+1)/4)*sizeof(int);
unsigned int *pic = (unsigned int *)malloc(numbytes);
unsigned int filter[numfilters*filterdim*filterdim];
int *result;
int *maxpool;
result = (int *)malloc(numfilters*numbytes2);
maxpool = (int *)malloc(numfilters*numbytes3);
unsigned int *pich;
int *resulth;
int *maxpoolh;
hipMalloc(&pich, numbytes);
hipMalloc(&resulth, numfilters*numbytes2);
hipMalloc(&maxpoolh, numfilters*numbytes3);
int i,j,k,l,count,dimx;
for (i=0; i<xsize; i++) {
for (j=0; j<xsize; j++) {
pic[i*xsize + j] = 1;
//printf("pic[%d][%d] : %d\t",i,j,pic[i*xsize + j]);
}
// printf("\n");
}
for(int k=0;k<numfilters;k++){
for (int i=0; i<filterdim; i++) {
for (int j=0; j<filterdim; j++){
filter[k*(filterdim*filterdim) + i*filterdim + j] = 1;
// printf("filter[%d][%d]: %d\n",k, i*filterdim + j, filter[k*(filterdim*filterdim) + i*filterdim + j]);
}
}
}
// int blocksize, gridsize;
dim3 dimGrid (32);
dim3 dimBlock (32,32);
// gridsize = numfilters;
// blocksize = (24,24);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipMemcpy(pich,pic,numbytes, hipMemcpyHostToDevice);
hipMemcpyToSymbol(FIL, filter, numfilters*filterdim*filterdim*sizeof(int));
hipLaunchKernelGGL(( conv1), dim3(dimGrid), dim3(dimBlock), 0, 0, pich, resulth, xsize, numfilters, filterdim);
hipMemcpy(result,resulth,numfilters*numbytes2,hipMemcpyDeviceToHost);
dim3 dimBlock1 (16,16);
hipMemcpy(resulth, result,numfilters*numbytes2, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( maxpooling), dim3(dimGrid), dim3(dimBlock1), 0, 0, resulth, maxpoolh, xsize, filterdim, numfilters);
hipMemcpy(maxpool, maxpoolh, numfilters*numbytes3, hipMemcpyDeviceToHost);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float time = 0;
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Time taken on GPU: %f ms\n", time);
}
| e19a753e8bb31c6815b8cd687c5c6db7c2d3aab0.cu | /*****Implemented first layer of convolution using global memory*******/
/**Implemented First Maxpool Layer**/
/**Measuring time**/
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
// #include<time.h>
#include<float.h>
__constant__ int FIL[32*5*5];
__global__ void conv1(unsigned int *pich, int *resulth, int xsize, int numfilters, int filterdim){
int i,j,k,l;
int sum;
int height;
i = threadIdx.y;
j = threadIdx.x;
l = blockIdx.x;
k=0;
sum =0;
// height = blockIdx.x*(xsize -filterdim +1)*(xsize -filterdim +1);
if(i<(xsize -filterdim +1)&& j<(xsize -filterdim +1)){
sum = (FIL[l*(filterdim*filterdim) + k])*pich[ xsize * (i) + j ] + (FIL[l*(filterdim*filterdim) + k+1])*pich[ xsize*(i) + (j+1) ]
+ FIL[l*(filterdim*filterdim)+ k+2]*pich[ xsize * (i)+(j+2)] + FIL[l*(filterdim*filterdim) +k+3]*pich[xsize * (i)+(j+3)]
+ FIL[l*(filterdim*filterdim) +k+4]*pich[ xsize * (i)+(j+4)]+ FIL[l*(filterdim*filterdim) + k+5]*pich[ xsize*(i+1)+(j) ]
+ FIL[l*(filterdim*filterdim) +k+6]*pich[ xsize * (i+1) + (j+1) ] + FIL[l*(filterdim*filterdim) + k+7]*pich[ xsize*(i+1) + (j+2) ] +
FIL[l*(filterdim*filterdim) +k+8]*pich[ xsize*(i+1) + (j+3) ] + FIL[l*(filterdim*filterdim) +k+9]*pich[ xsize*(i+1) + (j+4) ] +
FIL[l*(filterdim*filterdim) +k+10]*pich[ xsize*(i+2) + (j) ] + FIL[l*(filterdim*filterdim) +k+11]*pich[ xsize * (i+2) + (j+1) ] +
FIL[l*(filterdim*filterdim) +k+12]*pich[ xsize*(i+2) + (j+2)] + FIL[l*(filterdim*filterdim) +k+13]*pich[ xsize*(i+2) + (j+3)]
+FIL[l*(filterdim*filterdim) +k+14]*pich[ xsize*(i+2) + (j+4)] + FIL[l*(filterdim*filterdim) +k+15]*pich[ xsize*(i+3) + (j)]
+ FIL[l*(filterdim*filterdim) +k+16]*pich[ xsize*(i+3) + (j+1)] + FIL[l*(filterdim*filterdim) +k+17]*pich[ xsize*(i+3) + (j+2)]
+ FIL[l*(filterdim*filterdim) +k+18]*pich[ xsize*(i+3) + (j+3)] + FIL[l*(filterdim*filterdim) +k+19]*pich[ xsize*(i+3) + (j+4)]
+ FIL[l*(filterdim*filterdim) +k+20]*pich[ xsize*(i+4) + (j)] +FIL[l*(filterdim*filterdim) +k+21]*pich[ xsize*(i+3) + (j+1)]
+ FIL[l*(filterdim*filterdim) +k+22]*pich[ xsize*(i+4) + (j+2)] + FIL[l*(filterdim*filterdim) +k+23]*pich[ xsize*(i+4) + (j+3)]
+ FIL[l*(filterdim*filterdim) + k+24]*pich[ xsize*(i+4) + (j+4)];
resulth[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j] = sum;
printf("resultgpu[%d][%d]=%d\n",l,i*(xsize - filterdim +1)+j,resulth[l*(xsize -filterdim +1)*(xsize -filterdim +1) + i*(xsize - filterdim +1)+j]);
}
}
__global__ void maxpooling(int *resulth, int *maxpoolh, int xsize, int filterdim, int numfilters){
int i,j,l;
int temp;
i = threadIdx.y;
j = threadIdx.x;
l = blockIdx.x;
if(i<((xsize-filterdim+1)/2)&&(j<((xsize-filterdim+1)/2))){
int a,b,c,d,index, max1, max2;
index = l*((xsize -filterdim +1)*(xsize -filterdim +1))+ threadIdx.x*2 + threadIdx.y*2*(xsize -filterdim +1);
a = resulth[index];
b = resulth[index +1];
c = resulth[index+(xsize-filterdim+1)];
d = resulth[index + (xsize-filterdim+2)];
if(a>b){
max1 = a;
}
else{
max1 = b;
}
if(c>d){
max2 = c;
}
else{
max2 = d;
}
if(max1>max2){
maxpoolh[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j]=max1;
}
else{
maxpoolh[l*(xsize -filterdim +1)*(xsize -filterdim +1)/4 + i*(xsize - filterdim +1)/2+j] = max2;
}
}
}
int main( int argc, char **argv )
{
int xsize;
int filterdim;
int numfilters;
xsize = 28;
filterdim = 5;
numfilters =32;
int numbytes = xsize*xsize*sizeof(int);
int numbytes2 = (xsize-filterdim+1)*(xsize-filterdim+1)*sizeof(int);
/**Numbytes required for output of first maxpool layer**/
int numbytes3 = ((xsize-filterdim+1)*(xsize-filterdim+1)/4)*sizeof(int);
unsigned int *pic = (unsigned int *)malloc(numbytes);
unsigned int filter[numfilters*filterdim*filterdim];
int *result;
int *maxpool;
result = (int *)malloc(numfilters*numbytes2);
maxpool = (int *)malloc(numfilters*numbytes3);
unsigned int *pich;
int *resulth;
int *maxpoolh;
cudaMalloc(&pich, numbytes);
cudaMalloc(&resulth, numfilters*numbytes2);
cudaMalloc(&maxpoolh, numfilters*numbytes3);
int i,j,k,l,count,dimx;
for (i=0; i<xsize; i++) {
for (j=0; j<xsize; j++) {
pic[i*xsize + j] = 1;
//printf("pic[%d][%d] : %d\t",i,j,pic[i*xsize + j]);
}
// printf("\n");
}
for(int k=0;k<numfilters;k++){
for (int i=0; i<filterdim; i++) {
for (int j=0; j<filterdim; j++){
filter[k*(filterdim*filterdim) + i*filterdim + j] = 1;
// printf("filter[%d][%d]: %d\n",k, i*filterdim + j, filter[k*(filterdim*filterdim) + i*filterdim + j]);
}
}
}
// int blocksize, gridsize;
dim3 dimGrid (32);
dim3 dimBlock (32,32);
// gridsize = numfilters;
// blocksize = (24,24);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaMemcpy(pich,pic,numbytes, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(FIL, filter, numfilters*filterdim*filterdim*sizeof(int));
conv1<<<dimGrid, dimBlock>>>(pich, resulth, xsize, numfilters, filterdim);
cudaMemcpy(result,resulth,numfilters*numbytes2,cudaMemcpyDeviceToHost);
dim3 dimBlock1 (16,16);
cudaMemcpy(resulth, result,numfilters*numbytes2, cudaMemcpyHostToDevice);
maxpooling<<<dimGrid, dimBlock1>>>(resulth, maxpoolh, xsize, filterdim, numfilters);
cudaMemcpy(maxpool, maxpoolh, numfilters*numbytes3, cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time taken on GPU: %f ms\n", time);
}
|
fba12f83d0a66098ed0e791f001f37947250d760.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <chrono>
#include <vector>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
using namespace std;
/* ----- BEGIN Shared Library Export ----- */
// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
#if defined(_MSC_VER)
// Microsoft
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#elif defined(_GCC)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
/* ----- END Shared Library Export ----- */
/* ----- BEGIN Class Type ----- */
typedef int obj_id_t;
typedef int class_id_t;
/* ----- END Class Type ----- */
/* ----- BEGIN Environment (lexical variables) ----- */
// environment_struct must be defined later
typedef struct environment_struct environment_t;
/* ----- END Environment (lexical variables) ----- */
/* ----- BEGIN Forward declarations ----- */
typedef struct result_t result_t;
/* ----- END Forward declarations ----- */
// Define program result variable. Also contains benchmark numbers.
result_t *program_result;
// Variables for measuring time
chrono::high_resolution_clock::time_point start_time;
chrono::high_resolution_clock::time_point end_time;
/* ----- BEGIN Macros ----- */
#define timeStartMeasure() start_time = chrono::high_resolution_clock::now();
#define timeReportMeasure(result_var, variable_name) \
end_time = chrono::high_resolution_clock::now(); \
result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
/* ----- END Macros ----- */
struct indexed_struct_4_lt_int_int_int_int_gt_t
{
int field_0;
int field_1;
int field_2;
int field_3;
};
/* ----- BEGIN Structs ----- */
struct variable_size_array_t {
void *content;
int size;
variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { };
static const variable_size_array_t error_return_value;
};
// error_return_value is used in case a host section terminates abnormally
const variable_size_array_t variable_size_array_t::error_return_value =
variable_size_array_t(NULL, 0);
/* ----- BEGIN Union Type ----- */
typedef union union_type_value {
obj_id_t object_id;
int int_;
float float_;
bool bool_;
void *pointer;
variable_size_array_t variable_size_array;
__host__ __device__ union_type_value(int value) : int_(value) { };
__host__ __device__ union_type_value(float value) : float_(value) { };
__host__ __device__ union_type_value(bool value) : bool_(value) { };
__host__ __device__ union_type_value(void *value) : pointer(value) { };
__host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { };
__host__ __device__ static union_type_value from_object_id(obj_id_t value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_int(int value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_float(float value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_bool(bool value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_pointer(void *value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value)
{
return union_type_value(value);
}
} union_v_t;
typedef struct union_type_struct
{
class_id_t class_id;
union_v_t value;
__host__ __device__ union_type_struct(
class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0))
: class_id(class_id_), value(value_) { };
static const union_type_struct error_return_value;
} union_t;
// error_return_value is used in case a host section terminates abnormally
const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0));
/* ----- END Union Type ----- */
typedef struct result_t {
variable_size_array_t result;
int last_error;
uint64_t time_setup_cuda;
uint64_t time_prepare_env;
uint64_t time_kernel;
uint64_t time_free_memory;
uint64_t time_transfer_memory;
uint64_t time_allocate_memory;
// Memory management
vector<void*> *device_allocations;
} result_t;
/* ----- END Structs ----- */
struct array_command_1 {
// Ikra::Symbolic::ArrayIndexCommand
indexed_struct_4_lt_int_int_int_int_gt_t *result;
__host__ __device__ array_command_1(indexed_struct_4_lt_int_int_int_int_gt_t *result = NULL) : result(result) { }
};
struct array_command_2 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_1 *input_0;
__host__ __device__ array_command_2(int *result = NULL, array_command_1 *input_0 = NULL) : result(result), input_0(input_0) { }
};
struct array_command_3 {
// Ikra::Symbolic::FixedSizeArrayInHostSectionCommand
int *result;
variable_size_array_t input_0;
__host__ __device__ array_command_3(int *result = NULL, variable_size_array_t input_0 = variable_size_array_t::error_return_value) : result(result), input_0(input_0) { }
int size() { return input_0.size; }
};
struct array_command_5 {
// Ikra::Symbolic::ArrayIndexCommand
indexed_struct_4_lt_int_int_int_int_gt_t *result;
__host__ __device__ array_command_5(indexed_struct_4_lt_int_int_int_int_gt_t *result = NULL) : result(result) { }
};
struct array_command_4 {
// Ikra::Symbolic::ArrayStencilCommand
int *result;
array_command_3 *input_0;
array_command_5 *input_1;
__host__ __device__ array_command_4(int *result = NULL, array_command_3 *input_0 = NULL, array_command_5 *input_1 = NULL) : result(result), input_0(input_0), input_1(input_1) { }
};
struct environment_struct
{
};
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((indices.field_0 + indices.field_1)) % ((((indices.field_3 + ({ int _temp_var_117 = ((indices.field_1 % 4));
(_temp_var_117 == 0 ? indices.field_0 : (_temp_var_117 == 1 ? indices.field_1 : (_temp_var_117 == 2 ? indices.field_2 : (_temp_var_117 == 3 ? indices.field_3 : NULL)))); }))) + 7)));
}
}
#endif
__global__ void kernel_197(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((indices.field_0 + indices.field_1)) % ((((indices.field_3 + ({ int _temp_var_118 = ((indices.field_1 % 4));
(_temp_var_118 == 0 ? indices.field_0 : (_temp_var_118 == 1 ? indices.field_1 : (_temp_var_118 == 2 ? indices.field_2 : (_temp_var_118 == 3 ? indices.field_3 : NULL)))); }))) + 7)));
}
}
#endif
__global__ void kernel_199(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_201(environment_t *_env_, int _num_threads_, int *_result_, int *_array_203_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_203_[_tid_];
}
}
__global__ void kernel_206(environment_t *_env_, int _num_threads_, int *_result_, int *_array_208_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_208_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_121 = ((({ int _temp_var_122 = ((({ int _temp_var_123 = ((values[2] % 4));
(_temp_var_123 == 0 ? indices.field_0 : (_temp_var_123 == 1 ? indices.field_1 : (_temp_var_123 == 2 ? indices.field_2 : (_temp_var_123 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_122 == 0 ? indices.field_0 : (_temp_var_122 == 1 ? indices.field_1 : (_temp_var_122 == 2 ? indices.field_2 : (_temp_var_122 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_121 == 0 ? indices.field_0 : (_temp_var_121 == 1 ? indices.field_1 : (_temp_var_121 == 2 ? indices.field_2 : (_temp_var_121 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_204(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_207)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_209;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_209 = _block_k_4_(_env_, _kernel_result_207[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_207[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_207[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_207[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_209 = 37;
}
_result_[_tid_] = temp_stencil_209;
}
}
__global__ void kernel_210(environment_t *_env_, int _num_threads_, int *_result_, int *_array_212_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_212_[_tid_];
}
}
__global__ void kernel_215(environment_t *_env_, int _num_threads_, int *_result_, int *_array_217_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_217_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_126 = ((({ int _temp_var_127 = ((({ int _temp_var_128 = ((values[2] % 4));
(_temp_var_128 == 0 ? indices.field_0 : (_temp_var_128 == 1 ? indices.field_1 : (_temp_var_128 == 2 ? indices.field_2 : (_temp_var_128 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_127 == 0 ? indices.field_0 : (_temp_var_127 == 1 ? indices.field_1 : (_temp_var_127 == 2 ? indices.field_2 : (_temp_var_127 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_126 == 0 ? indices.field_0 : (_temp_var_126 == 1 ? indices.field_1 : (_temp_var_126 == 2 ? indices.field_2 : (_temp_var_126 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_213(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_216)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_218;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_218 = _block_k_4_(_env_, _kernel_result_216[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_216[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_216[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_216[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_218 = 37;
}
_result_[_tid_] = temp_stencil_218;
}
}
__global__ void kernel_219(environment_t *_env_, int _num_threads_, int *_result_, int *_array_221_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_221_[_tid_];
}
}
__global__ void kernel_224(environment_t *_env_, int _num_threads_, int *_result_, int *_array_226_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_226_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_131 = ((({ int _temp_var_132 = ((({ int _temp_var_133 = ((values[2] % 4));
(_temp_var_133 == 0 ? indices.field_0 : (_temp_var_133 == 1 ? indices.field_1 : (_temp_var_133 == 2 ? indices.field_2 : (_temp_var_133 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_132 == 0 ? indices.field_0 : (_temp_var_132 == 1 ? indices.field_1 : (_temp_var_132 == 2 ? indices.field_2 : (_temp_var_132 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_131 == 0 ? indices.field_0 : (_temp_var_131 == 1 ? indices.field_1 : (_temp_var_131 == 2 ? indices.field_2 : (_temp_var_131 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_222(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_225)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_227;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_227 = _block_k_4_(_env_, _kernel_result_225[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_225[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_225[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_225[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_227 = 37;
}
_result_[_tid_] = temp_stencil_227;
}
}
__global__ void kernel_228(environment_t *_env_, int _num_threads_, int *_result_, int *_array_230_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_230_[_tid_];
}
}
__global__ void kernel_233(environment_t *_env_, int _num_threads_, int *_result_, int *_array_235_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_235_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_136 = ((({ int _temp_var_137 = ((({ int _temp_var_138 = ((values[2] % 4));
(_temp_var_138 == 0 ? indices.field_0 : (_temp_var_138 == 1 ? indices.field_1 : (_temp_var_138 == 2 ? indices.field_2 : (_temp_var_138 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_137 == 0 ? indices.field_0 : (_temp_var_137 == 1 ? indices.field_1 : (_temp_var_137 == 2 ? indices.field_2 : (_temp_var_137 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_136 == 0 ? indices.field_0 : (_temp_var_136 == 1 ? indices.field_1 : (_temp_var_136 == 2 ? indices.field_2 : (_temp_var_136 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_231(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_234)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_236;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_236 = _block_k_4_(_env_, _kernel_result_234[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_234[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_234[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_234[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_236 = 37;
}
_result_[_tid_] = temp_stencil_236;
}
}
__global__ void kernel_237(environment_t *_env_, int _num_threads_, int *_result_, int *_array_239_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_239_[_tid_];
}
}
__global__ void kernel_242(environment_t *_env_, int _num_threads_, int *_result_, int *_array_244_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_244_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_143 = ((({ int _temp_var_144 = ((({ int _temp_var_145 = ((values[2] % 4));
(_temp_var_145 == 0 ? indices.field_0 : (_temp_var_145 == 1 ? indices.field_1 : (_temp_var_145 == 2 ? indices.field_2 : (_temp_var_145 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_144 == 0 ? indices.field_0 : (_temp_var_144 == 1 ? indices.field_1 : (_temp_var_144 == 2 ? indices.field_2 : (_temp_var_144 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_143 == 0 ? indices.field_0 : (_temp_var_143 == 1 ? indices.field_1 : (_temp_var_143 == 2 ? indices.field_2 : (_temp_var_143 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_240(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_243)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_245;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_245 = _block_k_4_(_env_, _kernel_result_243[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_243[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_243[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_243[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_245 = 37;
}
_result_[_tid_] = temp_stencil_245;
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
hipError_t error = hipGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, hipGetErrorString(error));\
hipDeviceReset();\
return variable_size_array_t::error_return_value;\
}
variable_size_array_t _host_section__(environment_t *host_env, environment_t *dev_env, result_t *program_result)
{
array_command_2 * x = new array_command_2();
union_t _ssa_var_old_old_data_3;
array_command_4 * _ssa_var_y_6;
union_t _ssa_var_old_data_5;
union_t _ssa_var_old_old_data_4;
int r;
union_t _ssa_var_old_data_2;
union_t _ssa_var_y_1;
{
_ssa_var_y_1 = union_t(10, union_v_t::from_pointer((void *) new array_command_3(NULL, ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 10000000]
array_command_2 * cmd = x;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_198;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_198, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_198);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_197), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_198);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_198;
}
variable_size_array_t((void *) cmd->result, 10000000);
}))));
_ssa_var_old_data_2 = union_t(19, union_v_t::from_pointer((void *) x));
_ssa_var_old_old_data_3 = union_t(19, union_v_t::from_pointer((void *) x));
for (r = 0; r <= (200 - 1); r++)
{
_ssa_var_old_old_data_4 = _ssa_var_old_data_2;
_ssa_var_old_data_5 = _ssa_var_y_1;
_ssa_var_y_6 = new array_command_4(NULL, new array_command_3(NULL, ({
variable_size_array_t _polytemp_result_119;
{
union_t _polytemp_expr_120 = _ssa_var_y_1;
switch (_polytemp_expr_120.class_id)
{
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_119 = ({
// [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_120.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_202;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_202, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_202);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_201), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_202, ((int *) cmd->input_0.content));
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_202;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
case 20: /* [Ikra::Symbolic::ArrayStencilCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_119 = ({
// [Ikra::Symbolic::ArrayStencilCommand, size = 10000000]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_y_1].__call__()].to_command()].pstencil([ArrayNode: [[ArrayNode: [<-1>, <0>, <0>, <0>]], [ArrayNode: [<0>, <0>, <0>, <0>]], [ArrayNode: [<1>, <0>, <0>, <0>]], [ArrayNode: [<-1>, <-1>, <0>, <0>]]]]; <37>; [HashNode: {<:with_index> => [BeginNode: {<true>}]}])]
array_command_4 * cmd = (array_command_4 *) _polytemp_expr_120.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_207;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_207, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_207);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_206), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_207, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_205;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_205, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_205);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_204), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_205, _kernel_result_207);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_205;
timeStartMeasure();
if (_kernel_result_207 != cmd->result) {
// Don't free memory if it is the result. There is already a similar check in
// program_builder (free all except for last). However, this check is not sufficient in
// case the same array is reused!
checkErrorReturn(program_result, hipFree(_kernel_result_207));
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
_kernel_result_207),
program_result->device_allocations->end());
}
timeReportMeasure(program_result, free_memory);
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
}
}
_polytemp_result_119;
})));
if (((r > 1)))
{
({
bool _polytemp_result_139;
{
union_t _polytemp_expr_140 = _ssa_var_old_old_data_4;
switch (_polytemp_expr_140.class_id)
{
case 19: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_139 = ({
array_command_2 * cmd_to_free = (array_command_2 *) _polytemp_expr_140.value.pointer;
timeStartMeasure();
bool freed_memory = false;
if (cmd_to_free->result != 0) {
checkErrorReturn(program_result, hipFree(cmd_to_free->result));;
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
cmd_to_free->result),
program_result->device_allocations->end());
freed_memory = true;
}
timeReportMeasure(program_result, free_memory);
freed_memory;
}); break;
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_139 = ({
array_command_3 * cmd_to_free = (array_command_3 *) _polytemp_expr_140.value.pointer;
timeStartMeasure();
bool freed_memory = false;
if (cmd_to_free->result != 0) {
checkErrorReturn(program_result, hipFree(cmd_to_free->result));;
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
cmd_to_free->result),
program_result->device_allocations->end());
freed_memory = true;
}
timeReportMeasure(program_result, free_memory);
freed_memory;
}); break;
case 20: /* [Ikra::Symbolic::ArrayStencilCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_139 = ({
array_command_4 * cmd_to_free = (array_command_4 *) _polytemp_expr_140.value.pointer;
timeStartMeasure();
bool freed_memory = false;
if (cmd_to_free->result != 0) {
checkErrorReturn(program_result, hipFree(cmd_to_free->result));;
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
cmd_to_free->result),
program_result->device_allocations->end());
freed_memory = true;
}
timeReportMeasure(program_result, free_memory);
freed_memory;
}); break;
}
}
_polytemp_result_139;
});
}
else
{
}
_ssa_var_y_1 = union_t(20, union_v_t::from_pointer((void *) _ssa_var_y_6));
_ssa_var_old_data_2 = _ssa_var_old_data_5;
_ssa_var_old_old_data_3 = _ssa_var_old_old_data_4;
}
r--;
return ({
variable_size_array_t _polytemp_result_141;
{
union_t _polytemp_expr_142 = _ssa_var_y_1;
switch (_polytemp_expr_142.class_id)
{
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_141 = ({
// [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_142.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_238;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_238, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_238);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_237), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_238, ((int *) cmd->input_0.content));
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_238;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
case 20: /* [Ikra::Symbolic::ArrayStencilCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_141 = ({
// [Ikra::Symbolic::ArrayStencilCommand, size = 10000000]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_y_1].__call__()].to_command()].pstencil([ArrayNode: [[ArrayNode: [<-1>, <0>, <0>, <0>]], [ArrayNode: [<0>, <0>, <0>, <0>]], [ArrayNode: [<1>, <0>, <0>, <0>]], [ArrayNode: [<-1>, <-1>, <0>, <0>]]]]; <37>; [HashNode: {<:with_index> => [BeginNode: {<true>}]}])]
array_command_4 * cmd = (array_command_4 *) _polytemp_expr_142.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_243;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_243, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_243);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_242), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_243, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_241;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_241, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_241);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_240), dim3(39063), dim3(256), 0, 0, dev_env, 10000000, _kernel_result_241, _kernel_result_243);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_241;
timeStartMeasure();
if (_kernel_result_243 != cmd->result) {
// Don't free memory if it is the result. There is already a similar check in
// program_builder (free all except for last). However, this check is not sufficient in
// case the same array is reused!
checkErrorReturn(program_result, hipFree(_kernel_result_243));
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
_kernel_result_243),
program_result->device_allocations->end());
}
timeReportMeasure(program_result, free_memory);
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
}
}
_polytemp_result_141;
});
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
expr
extern "C" EXPORT result_t *launch_kernel(environment_t *host_env)
{
// CUDA Initialization
program_result = new result_t();
program_result->device_allocations = new vector<void*>();
timeStartMeasure();
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
program_result->last_error = -1;
return program_result;
}
checkErrorReturn(program_result, hipFree(0));
timeReportMeasure(program_result, setup_cuda);
/* Prepare environment */
/* Allocate device environment and copy over struct */
environment_t *dev_env;
timeStartMeasure();
checkErrorReturn(program_result, hipMalloc(&dev_env, sizeof(environment_t)));
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipMemcpy(dev_env, host_env, sizeof(environment_t), hipMemcpyHostToDevice));
timeReportMeasure(program_result, transfer_memory);
/* Copy back memory and set pointer of result */
program_result->result = ({
variable_size_array_t device_array = _host_section__(host_env, dev_env, program_result);
int * tmp_result = (int *) malloc(sizeof(int) * device_array.size);
timeStartMeasure();
checkErrorReturn(program_result, hipMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, hipMemcpyDeviceToHost));
timeReportMeasure(program_result, transfer_memory);
variable_size_array_t((void *) tmp_result, device_array.size);
});
/* Free device memory */
timeStartMeasure();
for (
auto device_ptr = program_result->device_allocations->begin();
device_ptr < program_result->device_allocations->end();
device_ptr++)
{
checkErrorReturn(program_result, hipFree(*device_ptr));
}
delete program_result->device_allocations;
timeReportMeasure(program_result, free_memory);
return program_result;
}
| fba12f83d0a66098ed0e791f001f37947250d760.cu | #include <stdio.h>
#include <assert.h>
#include <chrono>
#include <vector>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
using namespace std;
/* ----- BEGIN Shared Library Export ----- */
// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
#if defined(_MSC_VER)
// Microsoft
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#elif defined(_GCC)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
/* ----- END Shared Library Export ----- */
/* ----- BEGIN Class Type ----- */
typedef int obj_id_t;
typedef int class_id_t;
/* ----- END Class Type ----- */
/* ----- BEGIN Environment (lexical variables) ----- */
// environment_struct must be defined later
typedef struct environment_struct environment_t;
/* ----- END Environment (lexical variables) ----- */
/* ----- BEGIN Forward declarations ----- */
typedef struct result_t result_t;
/* ----- END Forward declarations ----- */
// Define program result variable. Also contains benchmark numbers.
result_t *program_result;
// Variables for measuring time
chrono::high_resolution_clock::time_point start_time;
chrono::high_resolution_clock::time_point end_time;
/* ----- BEGIN Macros ----- */
#define timeStartMeasure() start_time = chrono::high_resolution_clock::now();
#define timeReportMeasure(result_var, variable_name) \
end_time = chrono::high_resolution_clock::now(); \
result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
/* ----- END Macros ----- */
struct indexed_struct_4_lt_int_int_int_int_gt_t
{
int field_0;
int field_1;
int field_2;
int field_3;
};
/* ----- BEGIN Structs ----- */
struct variable_size_array_t {
void *content;
int size;
variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { };
static const variable_size_array_t error_return_value;
};
// error_return_value is used in case a host section terminates abnormally
const variable_size_array_t variable_size_array_t::error_return_value =
variable_size_array_t(NULL, 0);
/* ----- BEGIN Union Type ----- */
typedef union union_type_value {
obj_id_t object_id;
int int_;
float float_;
bool bool_;
void *pointer;
variable_size_array_t variable_size_array;
__host__ __device__ union_type_value(int value) : int_(value) { };
__host__ __device__ union_type_value(float value) : float_(value) { };
__host__ __device__ union_type_value(bool value) : bool_(value) { };
__host__ __device__ union_type_value(void *value) : pointer(value) { };
__host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { };
__host__ __device__ static union_type_value from_object_id(obj_id_t value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_int(int value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_float(float value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_bool(bool value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_pointer(void *value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value)
{
return union_type_value(value);
}
} union_v_t;
typedef struct union_type_struct
{
class_id_t class_id;
union_v_t value;
__host__ __device__ union_type_struct(
class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0))
: class_id(class_id_), value(value_) { };
static const union_type_struct error_return_value;
} union_t;
// error_return_value is used in case a host section terminates abnormally
const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0));
/* ----- END Union Type ----- */
typedef struct result_t {
variable_size_array_t result;
int last_error;
uint64_t time_setup_cuda;
uint64_t time_prepare_env;
uint64_t time_kernel;
uint64_t time_free_memory;
uint64_t time_transfer_memory;
uint64_t time_allocate_memory;
// Memory management
vector<void*> *device_allocations;
} result_t;
/* ----- END Structs ----- */
struct array_command_1 {
// Ikra::Symbolic::ArrayIndexCommand
indexed_struct_4_lt_int_int_int_int_gt_t *result;
__host__ __device__ array_command_1(indexed_struct_4_lt_int_int_int_int_gt_t *result = NULL) : result(result) { }
};
struct array_command_2 {
// Ikra::Symbolic::ArrayCombineCommand
int *result;
array_command_1 *input_0;
__host__ __device__ array_command_2(int *result = NULL, array_command_1 *input_0 = NULL) : result(result), input_0(input_0) { }
};
struct array_command_3 {
// Ikra::Symbolic::FixedSizeArrayInHostSectionCommand
int *result;
variable_size_array_t input_0;
__host__ __device__ array_command_3(int *result = NULL, variable_size_array_t input_0 = variable_size_array_t::error_return_value) : result(result), input_0(input_0) { }
int size() { return input_0.size; }
};
struct array_command_5 {
// Ikra::Symbolic::ArrayIndexCommand
indexed_struct_4_lt_int_int_int_int_gt_t *result;
__host__ __device__ array_command_5(indexed_struct_4_lt_int_int_int_int_gt_t *result = NULL) : result(result) { }
};
struct array_command_4 {
// Ikra::Symbolic::ArrayStencilCommand
int *result;
array_command_3 *input_0;
array_command_5 *input_1;
__host__ __device__ array_command_4(int *result = NULL, array_command_3 *input_0 = NULL, array_command_5 *input_1 = NULL) : result(result), input_0(input_0), input_1(input_1) { }
};
struct environment_struct
{
};
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((indices.field_0 + indices.field_1)) % ((((indices.field_3 + ({ int _temp_var_117 = ((indices.field_1 % 4));
(_temp_var_117 == 0 ? indices.field_0 : (_temp_var_117 == 1 ? indices.field_1 : (_temp_var_117 == 2 ? indices.field_2 : (_temp_var_117 == 3 ? indices.field_3 : NULL)))); }))) + 7)));
}
}
#endif
__global__ void kernel_197(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
{
return (((indices.field_0 + indices.field_1)) % ((((indices.field_3 + ({ int _temp_var_118 = ((indices.field_1 % 4));
(_temp_var_118 == 0 ? indices.field_0 : (_temp_var_118 == 1 ? indices.field_1 : (_temp_var_118 == 2 ? indices.field_2 : (_temp_var_118 == 3 ? indices.field_3 : NULL)))); }))) + 7)));
}
}
#endif
__global__ void kernel_199(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
}
__global__ void kernel_201(environment_t *_env_, int _num_threads_, int *_result_, int *_array_203_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_203_[_tid_];
}
}
__global__ void kernel_206(environment_t *_env_, int _num_threads_, int *_result_, int *_array_208_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_208_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_121 = ((({ int _temp_var_122 = ((({ int _temp_var_123 = ((values[2] % 4));
(_temp_var_123 == 0 ? indices.field_0 : (_temp_var_123 == 1 ? indices.field_1 : (_temp_var_123 == 2 ? indices.field_2 : (_temp_var_123 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_122 == 0 ? indices.field_0 : (_temp_var_122 == 1 ? indices.field_1 : (_temp_var_122 == 2 ? indices.field_2 : (_temp_var_122 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_121 == 0 ? indices.field_0 : (_temp_var_121 == 1 ? indices.field_1 : (_temp_var_121 == 2 ? indices.field_2 : (_temp_var_121 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_204(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_207)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_209;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_209 = _block_k_4_(_env_, _kernel_result_207[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_207[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_207[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_207[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_209 = 37;
}
_result_[_tid_] = temp_stencil_209;
}
}
__global__ void kernel_210(environment_t *_env_, int _num_threads_, int *_result_, int *_array_212_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_212_[_tid_];
}
}
__global__ void kernel_215(environment_t *_env_, int _num_threads_, int *_result_, int *_array_217_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_217_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_126 = ((({ int _temp_var_127 = ((({ int _temp_var_128 = ((values[2] % 4));
(_temp_var_128 == 0 ? indices.field_0 : (_temp_var_128 == 1 ? indices.field_1 : (_temp_var_128 == 2 ? indices.field_2 : (_temp_var_128 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_127 == 0 ? indices.field_0 : (_temp_var_127 == 1 ? indices.field_1 : (_temp_var_127 == 2 ? indices.field_2 : (_temp_var_127 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_126 == 0 ? indices.field_0 : (_temp_var_126 == 1 ? indices.field_1 : (_temp_var_126 == 2 ? indices.field_2 : (_temp_var_126 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_213(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_216)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_218;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_218 = _block_k_4_(_env_, _kernel_result_216[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_216[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_216[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_216[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_218 = 37;
}
_result_[_tid_] = temp_stencil_218;
}
}
__global__ void kernel_219(environment_t *_env_, int _num_threads_, int *_result_, int *_array_221_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_221_[_tid_];
}
}
__global__ void kernel_224(environment_t *_env_, int _num_threads_, int *_result_, int *_array_226_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_226_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_131 = ((({ int _temp_var_132 = ((({ int _temp_var_133 = ((values[2] % 4));
(_temp_var_133 == 0 ? indices.field_0 : (_temp_var_133 == 1 ? indices.field_1 : (_temp_var_133 == 2 ? indices.field_2 : (_temp_var_133 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_132 == 0 ? indices.field_0 : (_temp_var_132 == 1 ? indices.field_1 : (_temp_var_132 == 2 ? indices.field_2 : (_temp_var_132 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_131 == 0 ? indices.field_0 : (_temp_var_131 == 1 ? indices.field_1 : (_temp_var_131 == 2 ? indices.field_2 : (_temp_var_131 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_222(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_225)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_227;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_227 = _block_k_4_(_env_, _kernel_result_225[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_225[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_225[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_225[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_227 = 37;
}
_result_[_tid_] = temp_stencil_227;
}
}
__global__ void kernel_228(environment_t *_env_, int _num_threads_, int *_result_, int *_array_230_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_230_[_tid_];
}
}
__global__ void kernel_233(environment_t *_env_, int _num_threads_, int *_result_, int *_array_235_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_235_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_136 = ((({ int _temp_var_137 = ((({ int _temp_var_138 = ((values[2] % 4));
(_temp_var_138 == 0 ? indices.field_0 : (_temp_var_138 == 1 ? indices.field_1 : (_temp_var_138 == 2 ? indices.field_2 : (_temp_var_138 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_137 == 0 ? indices.field_0 : (_temp_var_137 == 1 ? indices.field_1 : (_temp_var_137 == 2 ? indices.field_2 : (_temp_var_137 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_136 == 0 ? indices.field_0 : (_temp_var_136 == 1 ? indices.field_1 : (_temp_var_136 == 2 ? indices.field_2 : (_temp_var_136 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_231(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_234)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_236;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_236 = _block_k_4_(_env_, _kernel_result_234[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_234[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_234[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_234[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_236 = 37;
}
_result_[_tid_] = temp_stencil_236;
}
}
__global__ void kernel_237(environment_t *_env_, int _num_threads_, int *_result_, int *_array_239_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_239_[_tid_];
}
}
__global__ void kernel_242(environment_t *_env_, int _num_threads_, int *_result_, int *_array_244_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _array_244_[_tid_];
}
}
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int _values_0, int _values_1, int _values_2, int _values_3, indexed_struct_4_lt_int_int_int_int_gt_t indices)
{
// (Re)construct array from separately passed parameters
int values[] = { _values_0, _values_1, _values_2, _values_3 };
{
return (((((((values[0] % 938)) + ((values[1] / 97)))) % 97717)) + ((((({ int _temp_var_143 = ((({ int _temp_var_144 = ((({ int _temp_var_145 = ((values[2] % 4));
(_temp_var_145 == 0 ? indices.field_0 : (_temp_var_145 == 1 ? indices.field_1 : (_temp_var_145 == 2 ? indices.field_2 : (_temp_var_145 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_144 == 0 ? indices.field_0 : (_temp_var_144 == 1 ? indices.field_1 : (_temp_var_144 == 2 ? indices.field_2 : (_temp_var_144 == 3 ? indices.field_3 : NULL)))); }) % 4));
(_temp_var_143 == 0 ? indices.field_0 : (_temp_var_143 == 1 ? indices.field_1 : (_temp_var_143 == 2 ? indices.field_2 : (_temp_var_143 == 3 ? indices.field_3 : NULL)))); }) * ((values[3] % 7)))) % 99)));
}
}
#endif
__global__ void kernel_240(environment_t *_env_, int _num_threads_, int *_result_, int *_kernel_result_243)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
int temp_stencil_245;
// Indices for all dimensions
int temp_stencil_dim_0 = _tid_ / 500000;
int temp_stencil_dim_1 = (_tid_ / 1000) % 500;
int temp_stencil_dim_2 = (_tid_ / 2) % 500;
int temp_stencil_dim_3 = (_tid_ / 1) % 2;
if (temp_stencil_dim_0 + -1 >= 0 && temp_stencil_dim_0 + 1 < 20 && temp_stencil_dim_1 + -1 >= 0 && temp_stencil_dim_1 + 0 < 500 && temp_stencil_dim_2 + 0 >= 0 && temp_stencil_dim_2 + 0 < 500 && temp_stencil_dim_3 + 0 >= 0 && temp_stencil_dim_3 + 0 < 2)
{
// All value indices within bounds
temp_stencil_245 = _block_k_4_(_env_, _kernel_result_243[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + -1) * 500000], _kernel_result_243[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 0) * 500000], _kernel_result_243[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + 0) * 1000 + (temp_stencil_dim_0 + 1) * 500000], _kernel_result_243[(temp_stencil_dim_3 + 0) * 1 + (temp_stencil_dim_2 + 0) * 2 + (temp_stencil_dim_1 + -1) * 1000 + (temp_stencil_dim_0 + -1) * 500000], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2}));
}
else
{
// At least one index is out of bounds
temp_stencil_245 = 37;
}
_result_[_tid_] = temp_stencil_245;
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
cudaError_t error = cudaGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, cudaGetErrorString(error));\
cudaDeviceReset();\
return variable_size_array_t::error_return_value;\
}
variable_size_array_t _host_section__(environment_t *host_env, environment_t *dev_env, result_t *program_result)
{
array_command_2 * x = new array_command_2();
union_t _ssa_var_old_old_data_3;
array_command_4 * _ssa_var_y_6;
union_t _ssa_var_old_data_5;
union_t _ssa_var_old_old_data_4;
int r;
union_t _ssa_var_old_data_2;
union_t _ssa_var_y_1;
{
_ssa_var_y_1 = union_t(10, union_v_t::from_pointer((void *) new array_command_3(NULL, ({
// [Ikra::Symbolic::ArrayCombineCommand, size = 10000000]
array_command_2 * cmd = x;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_198;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_198, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_198);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_197<<<39063, 256>>>(dev_env, 10000000, _kernel_result_198);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_198;
}
variable_size_array_t((void *) cmd->result, 10000000);
}))));
_ssa_var_old_data_2 = union_t(19, union_v_t::from_pointer((void *) x));
_ssa_var_old_old_data_3 = union_t(19, union_v_t::from_pointer((void *) x));
for (r = 0; r <= (200 - 1); r++)
{
_ssa_var_old_old_data_4 = _ssa_var_old_data_2;
_ssa_var_old_data_5 = _ssa_var_y_1;
_ssa_var_y_6 = new array_command_4(NULL, new array_command_3(NULL, ({
variable_size_array_t _polytemp_result_119;
{
union_t _polytemp_expr_120 = _ssa_var_y_1;
switch (_polytemp_expr_120.class_id)
{
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_119 = ({
// [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_120.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_202;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_202, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_202);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_201<<<39063, 256>>>(dev_env, 10000000, _kernel_result_202, ((int *) cmd->input_0.content));
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_202;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
case 20: /* [Ikra::Symbolic::ArrayStencilCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_119 = ({
// [Ikra::Symbolic::ArrayStencilCommand, size = 10000000]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_y_1].__call__()].to_command()].pstencil([ArrayNode: [[ArrayNode: [<-1>, <0>, <0>, <0>]], [ArrayNode: [<0>, <0>, <0>, <0>]], [ArrayNode: [<1>, <0>, <0>, <0>]], [ArrayNode: [<-1>, <-1>, <0>, <0>]]]]; <37>; [HashNode: {<:with_index> => [BeginNode: {<true>}]}])]
array_command_4 * cmd = (array_command_4 *) _polytemp_expr_120.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_207;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_207, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_207);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_206<<<39063, 256>>>(dev_env, 10000000, _kernel_result_207, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_205;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_205, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_205);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_204<<<39063, 256>>>(dev_env, 10000000, _kernel_result_205, _kernel_result_207);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_205;
timeStartMeasure();
if (_kernel_result_207 != cmd->result) {
// Don't free memory if it is the result. There is already a similar check in
// program_builder (free all except for last). However, this check is not sufficient in
// case the same array is reused!
checkErrorReturn(program_result, cudaFree(_kernel_result_207));
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
_kernel_result_207),
program_result->device_allocations->end());
}
timeReportMeasure(program_result, free_memory);
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
}
}
_polytemp_result_119;
})));
if (((r > 1)))
{
({
bool _polytemp_result_139;
{
union_t _polytemp_expr_140 = _ssa_var_old_old_data_4;
switch (_polytemp_expr_140.class_id)
{
case 19: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_139 = ({
array_command_2 * cmd_to_free = (array_command_2 *) _polytemp_expr_140.value.pointer;
timeStartMeasure();
bool freed_memory = false;
if (cmd_to_free->result != 0) {
checkErrorReturn(program_result, cudaFree(cmd_to_free->result));;
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
cmd_to_free->result),
program_result->device_allocations->end());
freed_memory = true;
}
timeReportMeasure(program_result, free_memory);
freed_memory;
}); break;
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_139 = ({
array_command_3 * cmd_to_free = (array_command_3 *) _polytemp_expr_140.value.pointer;
timeStartMeasure();
bool freed_memory = false;
if (cmd_to_free->result != 0) {
checkErrorReturn(program_result, cudaFree(cmd_to_free->result));;
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
cmd_to_free->result),
program_result->device_allocations->end());
freed_memory = true;
}
timeReportMeasure(program_result, free_memory);
freed_memory;
}); break;
case 20: /* [Ikra::Symbolic::ArrayStencilCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_139 = ({
array_command_4 * cmd_to_free = (array_command_4 *) _polytemp_expr_140.value.pointer;
timeStartMeasure();
bool freed_memory = false;
if (cmd_to_free->result != 0) {
checkErrorReturn(program_result, cudaFree(cmd_to_free->result));;
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
cmd_to_free->result),
program_result->device_allocations->end());
freed_memory = true;
}
timeReportMeasure(program_result, free_memory);
freed_memory;
}); break;
}
}
_polytemp_result_139;
});
}
else
{
}
_ssa_var_y_1 = union_t(20, union_v_t::from_pointer((void *) _ssa_var_y_6));
_ssa_var_old_data_2 = _ssa_var_old_data_5;
_ssa_var_old_old_data_3 = _ssa_var_old_old_data_4;
}
r--;
return ({
variable_size_array_t _polytemp_result_141;
{
union_t _polytemp_expr_142 = _ssa_var_y_1;
switch (_polytemp_expr_142.class_id)
{
case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_141 = ({
// [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000]
array_command_3 * cmd = (array_command_3 *) _polytemp_expr_142.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_238;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_238, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_238);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_237<<<39063, 256>>>(dev_env, 10000000, _kernel_result_238, ((int *) cmd->input_0.content));
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_238;
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
case 20: /* [Ikra::Symbolic::ArrayStencilCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_141 = ({
// [Ikra::Symbolic::ArrayStencilCommand, size = 10000000]: [SendNode: [SendNode: [SendNode: [LVarReadNode: _ssa_var_y_1].__call__()].to_command()].pstencil([ArrayNode: [[ArrayNode: [<-1>, <0>, <0>, <0>]], [ArrayNode: [<0>, <0>, <0>, <0>]], [ArrayNode: [<1>, <0>, <0>, <0>]], [ArrayNode: [<-1>, <-1>, <0>, <0>]]]]; <37>; [HashNode: {<:with_index> => [BeginNode: {<true>}]}])]
array_command_4 * cmd = (array_command_4 *) _polytemp_expr_142.value.pointer;
if (cmd->result == 0) {
timeStartMeasure();
int * _kernel_result_243;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_243, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_243);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_242<<<39063, 256>>>(dev_env, 10000000, _kernel_result_243, ((int *) ((int *) cmd->input_0->input_0.content)));
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel); timeStartMeasure();
int * _kernel_result_241;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_241, (sizeof(int) * 10000000)));
program_result->device_allocations->push_back(_kernel_result_241);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_240<<<39063, 256>>>(dev_env, 10000000, _kernel_result_241, _kernel_result_243);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
cmd->result = _kernel_result_241;
timeStartMeasure();
if (_kernel_result_243 != cmd->result) {
// Don't free memory if it is the result. There is already a similar check in
// program_builder (free all except for last). However, this check is not sufficient in
// case the same array is reused!
checkErrorReturn(program_result, cudaFree(_kernel_result_243));
// Remove from list of allocations
program_result->device_allocations->erase(
std::remove(
program_result->device_allocations->begin(),
program_result->device_allocations->end(),
_kernel_result_243),
program_result->device_allocations->end());
}
timeReportMeasure(program_result, free_memory);
}
variable_size_array_t((void *) cmd->result, 10000000);
}); break;
}
}
_polytemp_result_141;
});
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
expr
extern "C" EXPORT result_t *launch_kernel(environment_t *host_env)
{
// CUDA Initialization
program_result = new result_t();
program_result->device_allocations = new vector<void*>();
timeStartMeasure();
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
program_result->last_error = -1;
return program_result;
}
checkErrorReturn(program_result, cudaFree(0));
timeReportMeasure(program_result, setup_cuda);
/* Prepare environment */
/* Allocate device environment and copy over struct */
environment_t *dev_env;
timeStartMeasure();
checkErrorReturn(program_result, cudaMalloc(&dev_env, sizeof(environment_t)));
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaMemcpy(dev_env, host_env, sizeof(environment_t), cudaMemcpyHostToDevice));
timeReportMeasure(program_result, transfer_memory);
/* Copy back memory and set pointer of result */
program_result->result = ({
variable_size_array_t device_array = _host_section__(host_env, dev_env, program_result);
int * tmp_result = (int *) malloc(sizeof(int) * device_array.size);
timeStartMeasure();
checkErrorReturn(program_result, cudaMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, cudaMemcpyDeviceToHost));
timeReportMeasure(program_result, transfer_memory);
variable_size_array_t((void *) tmp_result, device_array.size);
});
/* Free device memory */
timeStartMeasure();
for (
auto device_ptr = program_result->device_allocations->begin();
device_ptr < program_result->device_allocations->end();
device_ptr++)
{
checkErrorReturn(program_result, cudaFree(*device_ptr));
}
delete program_result->device_allocations;
timeReportMeasure(program_result, free_memory);
return program_result;
}
|
1f2c5deffc7d272abf6e0dfdf81351b3c8010928.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 512;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
double elapsed_time(long long start_time, long long end_time) {
return (double) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
void cuda_print_double_array(double *array_GPU, size_t size) {
//allocate temporary array for printing
double* mem = (double*) malloc(sizeof (double) *size);
//transfer data from device
hipMemcpy(mem, array_GPU, sizeof (double) *size, hipMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
//print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%d]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
//clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a double representing the sum
********************************/
__device__ double calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
double likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((double) (I[ind[index * numOnes + x]] - 100), 2) - pow((double) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(double * CDF, double * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a double representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ double d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
double test_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ double d_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = d_randu(seed, index);
double v = d_randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ double updateWeights(double * weights, double * likelihood, int Nparticles) {
int x;
double sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original double version.
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
__device__ double dev_round_double(double value) {
int newValue = (int) (value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, double * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
//weights[i] = 1 / ((double) (Nparticles)); //moved this code to the beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(double * weights, int Nparticles, double* partial_sums, double * CDF, double * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double u1, sumWeights;
if(0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] = (1 / ((double) (Nparticles))) * d_randu(seed, i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if(0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((double) (Nparticles));
}
}
__global__ void sum_kernel(double* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
double sum = 0.0;
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(double * arrayX, double * arrayY, double * xj, double * yj, double * CDF, int * ind, int * objxy, double * likelihood, unsigned char * I, double * u, double * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, double* partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ double buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] = 1 / ((double) (Nparticles)); //Donnie - moved this line from end of find_index_kernel to prevent all weights from being reset before calculating position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
//added dev_round_double() to be consistent with roundDouble
indX = dev_round_double(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_double(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] = weights[i] * exp(likelihood[i]); //Donnie Newell - added the missing exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
//this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double) (x - radius + 1), 2) + pow((double) (y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance = sqrt(pow((double) (x - posX), 2) + pow((double) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int) roundDouble(IszY / 2.0);
int y0 = (int) roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k-1));
yk = abs(y0 - 2 * (k-1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) * IszX * IszY * Nfr);
memset(newMatrix, 0, IszY * IszX * Nfr * sizeof(unsigned char));
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) {
int max_size = IszX * IszY*Nfr;
//original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
memset(disk, 0, sizeof(int) * diameter * diameter);
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
double * weights = (double *) malloc(sizeof (double) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double) (Nparticles));
}
//initial likelihood to 0.0
double * likelihood = (double *) malloc(sizeof (double) *Nparticles);
double * arrayX = (double *) malloc(sizeof (double) *Nparticles);
double * arrayY = (double *) malloc(sizeof (double) *Nparticles);
double * xj = (double *) malloc(sizeof (double) *Nparticles);
double * yj = (double *) malloc(sizeof (double) *Nparticles);
double * CDF = (double *) malloc(sizeof (double) *Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
double * likelihood_GPU;
unsigned char * I_GPU;
double * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
double * u = (double *) malloc(sizeof (double) *Nparticles);
double * u_GPU;
int * seed_GPU;
double* partial_sums;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &likelihood_GPU, sizeof (double) *Nparticles));
//set likelihood to zero
check_error(hipMemset((void *) likelihood_GPU, 0, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &weights_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
check_error(hipMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
check_error(hipMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
check_error(hipMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
check_error(hipMalloc((void **) &partial_sums, sizeof (double) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
int indX, indY;
//start send
long long send_start = get_time();
check_error(hipMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, hipMemcpyHostToDevice));
check_error(hipMemcpy(objxy_GPU, objxy, sizeof (int) *2 * countOnes, hipMemcpyHostToDevice));
check_error(hipMemcpy(weights_GPU, weights, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(xj_GPU, xj, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(yj_GPU, yj, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, hipMemcpyHostToDevice));
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (k = 1; k < Nfr; k++) {
likelihood_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel << < num_blocks, threads_per_block >> > (partial_sums, Nparticles);
normalize_weights_kernel << < num_blocks, threads_per_block >> > (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
}//end loop
//block till kernels are finished
hipDeviceSynchronize();
long long back_time = get_time();
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
long long free_time = get_time();
check_error(hipMemcpy(arrayX, arrayX_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
long long arrayX_time = get_time();
check_error(hipMemcpy(arrayY, arrayY_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
long long arrayY_time = get_time();
check_error(hipMemcpy(weights, weights_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((double) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
//CUDA freeing of memory
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
int main(int argc, char * argv[]) {
char* usage = "double.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
//check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| 1f2c5deffc7d272abf6e0dfdf81351b3c8010928.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 512;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
double elapsed_time(long long start_time, long long end_time) {
return (double) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
void cuda_print_double_array(double *array_GPU, size_t size) {
//allocate temporary array for printing
double* mem = (double*) malloc(sizeof (double) *size);
//transfer data from device
cudaMemcpy(mem, array_GPU, sizeof (double) *size, cudaMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
//print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%d]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
//clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a double representing the sum
********************************/
__device__ double calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
double likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((double) (I[ind[index * numOnes + x]] - 100), 2) - pow((double) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(double * CDF, double * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a double representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ double d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
double test_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ double d_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = d_randu(seed, index);
double v = d_randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ double updateWeights(double * weights, double * likelihood, int Nparticles) {
int x;
double sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original double version.
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
__device__ double dev_round_double(double value) {
int newValue = (int) (value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, double * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
//weights[i] = 1 / ((double) (Nparticles)); //moved this code to the beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(double * weights, int Nparticles, double* partial_sums, double * CDF, double * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double u1, sumWeights;
if(0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] = (1 / ((double) (Nparticles))) * d_randu(seed, i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if(0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((double) (Nparticles));
}
}
__global__ void sum_kernel(double* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
double sum = 0.0;
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(double * arrayX, double * arrayY, double * xj, double * yj, double * CDF, int * ind, int * objxy, double * likelihood, unsigned char * I, double * u, double * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, double* partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ double buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] = 1 / ((double) (Nparticles)); //Donnie - moved this line from end of find_index_kernel to prevent all weights from being reset before calculating position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
//added dev_round_double() to be consistent with roundDouble
indX = dev_round_double(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_double(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] = weights[i] * exp(likelihood[i]); //Donnie Newell - added the missing exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
//this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double) (x - radius + 1), 2) + pow((double) (y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance = sqrt(pow((double) (x - posX), 2) + pow((double) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int) roundDouble(IszY / 2.0);
int y0 = (int) roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k-1));
yk = abs(y0 - 2 * (k-1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) * IszX * IszY * Nfr);
memset(newMatrix, 0, IszY * IszX * Nfr * sizeof(unsigned char));
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) {
int max_size = IszX * IszY*Nfr;
//original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
memset(disk, 0, sizeof(int) * diameter * diameter);
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
double * weights = (double *) malloc(sizeof (double) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double) (Nparticles));
}
//initial likelihood to 0.0
double * likelihood = (double *) malloc(sizeof (double) *Nparticles);
double * arrayX = (double *) malloc(sizeof (double) *Nparticles);
double * arrayY = (double *) malloc(sizeof (double) *Nparticles);
double * xj = (double *) malloc(sizeof (double) *Nparticles);
double * yj = (double *) malloc(sizeof (double) *Nparticles);
double * CDF = (double *) malloc(sizeof (double) *Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
double * likelihood_GPU;
unsigned char * I_GPU;
double * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
double * u = (double *) malloc(sizeof (double) *Nparticles);
double * u_GPU;
int * seed_GPU;
double* partial_sums;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &likelihood_GPU, sizeof (double) *Nparticles));
//set likelihood to zero
check_error(cudaMemset((void *) likelihood_GPU, 0, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &weights_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
check_error(cudaMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
check_error(cudaMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
check_error(cudaMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
check_error(cudaMalloc((void **) &partial_sums, sizeof (double) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
int indX, indY;
//start send
long long send_start = get_time();
check_error(cudaMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(objxy_GPU, objxy, sizeof (int) *2 * countOnes, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(weights_GPU, weights, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(xj_GPU, xj, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(yj_GPU, yj, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, cudaMemcpyHostToDevice));
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (k = 1; k < Nfr; k++) {
likelihood_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel << < num_blocks, threads_per_block >> > (partial_sums, Nparticles);
normalize_weights_kernel << < num_blocks, threads_per_block >> > (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
}//end loop
//block till kernels are finished
cudaThreadSynchronize();
long long back_time = get_time();
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
long long free_time = get_time();
check_error(cudaMemcpy(arrayX, arrayX_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
long long arrayX_time = get_time();
check_error(cudaMemcpy(arrayY, arrayY_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
long long arrayY_time = get_time();
check_error(cudaMemcpy(weights, weights_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((double) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
//CUDA freeing of memory
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
int main(int argc, char * argv[]) {
char* usage = "double.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
//check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
0c962fcaddccf1952067640a1489182daf73b2bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_helper.cuh"
#include "../image_exception.h"
namespace Cuda
{
bool isCudaSupported()
{
int deviceCount = 0;
if( !cudaSafeCheck( hipGetDeviceCount( &deviceCount ) ) )
return false;
return (deviceCount > 0);
}
void validateKernel()
{
hipError_t error = hipGetLastError();
if( error != hipSuccess )
throw imageException( "Failed to launch CUDA kernel" );
}
void cudaCheck( hipError_t error )
{
if( error != hipSuccess ) {
hipGetLastError();
char errorMessage[64];
sprintf( errorMessage, "Failed to run CUDA function with error %d", error );
throw imageException( errorMessage );
}
}
bool cudaSafeCheck( hipError_t error )
{
const bool sucess = (error == hipSuccess);
if( !sucess )
hipGetLastError();
return sucess;
}
KernelParameters::KernelParameters( dim3 threadsPerBlock_, dim3 blocksPerGrid_ )
: threadsPerBlock( threadsPerBlock_ )
, blocksPerGrid ( blocksPerGrid_ )
{
}
KernelParameters getKernelParameters( uint32_t size )
{
static const uint32_t threadsPerBlock = 256;
return KernelParameters( threadsPerBlock, (size + threadsPerBlock - 1) / threadsPerBlock );
}
KernelParameters getKernelParameters( uint32_t width, uint32_t height )
{
dim3 threadsPerBlock( 16, 16 );
if( width > height ) {
uint32_t increasedHeight = height * 2;
while( (threadsPerBlock.y > 1) && (width >= increasedHeight) ) {
increasedHeight <<= 1;
threadsPerBlock.x <<= 1;
threadsPerBlock.y >>= 1;
}
}
else if( width < height ) {
uint32_t increasedWidth = width * 2;
while( (threadsPerBlock.x > 1) && (height >= increasedWidth) ) {
increasedWidth <<= 1;
threadsPerBlock.x >>= 1;
threadsPerBlock.y <<= 1;
}
}
const dim3 blocksPerGrid( (width + threadsPerBlock.x - 1) / threadsPerBlock.x,
(height + threadsPerBlock.y - 1) / threadsPerBlock.y );
return KernelParameters( threadsPerBlock, blocksPerGrid );
}
hipStream_t getCudaStream()
{
return 0;
}
}
| 0c962fcaddccf1952067640a1489182daf73b2bc.cu | #include "cuda_helper.cuh"
#include "../image_exception.h"
namespace Cuda
{
bool isCudaSupported()
{
int deviceCount = 0;
if( !cudaSafeCheck( cudaGetDeviceCount( &deviceCount ) ) )
return false;
return (deviceCount > 0);
}
void validateKernel()
{
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess )
throw imageException( "Failed to launch CUDA kernel" );
}
void cudaCheck( cudaError error )
{
if( error != cudaSuccess ) {
cudaGetLastError();
char errorMessage[64];
sprintf( errorMessage, "Failed to run CUDA function with error %d", error );
throw imageException( errorMessage );
}
}
bool cudaSafeCheck( cudaError error )
{
const bool sucess = (error == cudaSuccess);
if( !sucess )
cudaGetLastError();
return sucess;
}
KernelParameters::KernelParameters( dim3 threadsPerBlock_, dim3 blocksPerGrid_ )
: threadsPerBlock( threadsPerBlock_ )
, blocksPerGrid ( blocksPerGrid_ )
{
}
KernelParameters getKernelParameters( uint32_t size )
{
static const uint32_t threadsPerBlock = 256;
return KernelParameters( threadsPerBlock, (size + threadsPerBlock - 1) / threadsPerBlock );
}
KernelParameters getKernelParameters( uint32_t width, uint32_t height )
{
dim3 threadsPerBlock( 16, 16 );
if( width > height ) {
uint32_t increasedHeight = height * 2;
while( (threadsPerBlock.y > 1) && (width >= increasedHeight) ) {
increasedHeight <<= 1;
threadsPerBlock.x <<= 1;
threadsPerBlock.y >>= 1;
}
}
else if( width < height ) {
uint32_t increasedWidth = width * 2;
while( (threadsPerBlock.x > 1) && (height >= increasedWidth) ) {
increasedWidth <<= 1;
threadsPerBlock.x >>= 1;
threadsPerBlock.y <<= 1;
}
}
const dim3 blocksPerGrid( (width + threadsPerBlock.x - 1) / threadsPerBlock.x,
(height + threadsPerBlock.y - 1) / threadsPerBlock.y );
return KernelParameters( threadsPerBlock, blocksPerGrid );
}
cudaStream_t getCudaStream()
{
return 0;
}
}
|
a496bf15fdb6a0fda0223b2d82ab33b47cfab313.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void addOneRowPerThread(double* a, double* b, double* c, int n)
{
// Get the row for current thread
int row = (blockIdx.y * blockDim.y + threadIdx.y);
// Make sure we do not go out of bounds
if (row < n)
{
int idx = row * n;
for (int i = 0; i < n; i++)
{
c[idx + i] = a[idx + i] + b[idx + i];
}
}
} | a496bf15fdb6a0fda0223b2d82ab33b47cfab313.cu | #include "includes.h"
__global__ void addOneRowPerThread(double* a, double* b, double* c, int n)
{
// Get the row for current thread
int row = (blockIdx.y * blockDim.y + threadIdx.y);
// Make sure we do not go out of bounds
if (row < n)
{
int idx = row * n;
for (int i = 0; i < n; i++)
{
c[idx + i] = a[idx + i] + b[idx + i];
}
}
} |
c40e5798d7e80bf083c79086cd7419e6dd931590.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file ppmc_cuda.cu
* \brief Functions definitions for the ppm kernels, using characteristic
tracing. Written following Stone et al. 2008. */
#include <math.h>
#include "../global/global.h"
#include "../global/global_cuda.h"
#include "../reconstruction/ppmc_cuda.h"
#include "../reconstruction/reconstruction.h"
#include "../utils/gpu.hpp"
#include "../utils/hydro_utilities.h"
#ifdef DE // PRESSURE_DE
#include "../utils/hydro_utilities.h"
#endif
// =====================================================================================================================
/*!
* \brief When passed a stencil of conserved variables, returns the left and
right boundary values for the interface calculated using ppm. */
__global__ void PPMC_CTU(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, Real dx,
Real dt, Real gamma, int dir)
{
// get a thread ID
int const thread_id = threadIdx.x + blockIdx.x * blockDim.x;
int xid, yid, zid;
cuda_utilities::compute3DIndices(thread_id, nx, ny, xid, yid, zid);
if (reconstruction::Thread_Guard<3>(nx, ny, nz, xid, yid, zid)) {
return;
}
// Compute the total number of cells
int const n_cells = nx * ny * nz;
// Set the field indices for the various directions
int o1, o2, o3;
switch (dir) {
case 0:
o1 = grid_enum::momentum_x;
o2 = grid_enum::momentum_y;
o3 = grid_enum::momentum_z;
break;
case 1:
o1 = grid_enum::momentum_y;
o2 = grid_enum::momentum_z;
o3 = grid_enum::momentum_x;
break;
case 2:
o1 = grid_enum::momentum_z;
o2 = grid_enum::momentum_x;
o3 = grid_enum::momentum_y;
break;
}
// load the 5-cell stencil into registers
// cell i
reconstruction::Primitive const cell_i =
reconstruction::Load_Data(dev_conserved, xid, yid, zid, nx, ny, n_cells, o1, o2, o3, gamma);
// cell i-1. The equality checks check the direction and subtracts one from the direction
// im1 stands for "i minus 1"
reconstruction::Primitive const cell_im1 = reconstruction::Load_Data(
dev_conserved, xid - int(dir == 0), yid - int(dir == 1), zid - int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i+1. The equality checks check the direction and adds one to the direction
// ip1 stands for "i plus 1"
reconstruction::Primitive const cell_ip1 = reconstruction::Load_Data(
dev_conserved, xid + int(dir == 0), yid + int(dir == 1), zid + int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i-2. The equality checks check the direction and subtracts one from the direction
// im2 stands for "i minus 2"
reconstruction::Primitive const cell_im2 =
reconstruction::Load_Data(dev_conserved, xid - 2 * int(dir == 0), yid - 2 * int(dir == 1),
zid - 2 * int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i+2. The equality checks check the direction and adds one to the direction
// ip2 stands for "i plus 2"
reconstruction::Primitive const cell_ip2 =
reconstruction::Load_Data(dev_conserved, xid + 2 * int(dir == 0), yid + 2 * int(dir == 1),
zid + 2 * int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// Steps 2 - 5 are repeated for cell i-1, i, and i+1
// ===============
// Cell i-1 slopes
// ===============
// calculate the adiabatic sound speed in cell im1
Real sound_speed = hydro_utilities::Calc_Sound_Speed(cell_im1.pressure, cell_im1.density, gamma);
// this isn't actually used and the compiler should optimize it away but since this is the only reconstruction
// function that won't use it it was easier to add it here as an unused variable
reconstruction::EigenVecs eigenvector;
// Step 2 - Compute the left, right, centered, and van Leer differences of the primitive variables. Note that here L
// and R refer to locations relative to the cell center Stone Eqn 36
// left
reconstruction::Primitive del_L = reconstruction::Compute_Slope(cell_im2, cell_im1);
// right
reconstruction::Primitive del_R = reconstruction::Compute_Slope(cell_im1, cell_i);
// centered
reconstruction::Primitive del_C = reconstruction::Compute_Slope(cell_im2, cell_i, 0.5);
// Van Leer
reconstruction::Primitive del_G = reconstruction::Van_Leer_Slope(del_L, del_R);
// Step 3 - Project the left, right, centered and van Leer differences onto the
// characteristic variables Stone Eqn 37 (del_a are differences in
// characteristic variables, see Stone for notation) Use the eigenvectors
// given in Stone 2008, Appendix A
reconstruction::Characteristic del_a_L = reconstruction::Primitive_To_Characteristic(
cell_im1, del_L, eigenvector, sound_speed, sound_speed * sound_speed, gamma);
reconstruction::Characteristic del_a_R = reconstruction::Primitive_To_Characteristic(
cell_im1, del_R, eigenvector, sound_speed, sound_speed * sound_speed, gamma);
reconstruction::Characteristic del_a_C = reconstruction::Primitive_To_Characteristic(
cell_im1, del_C, eigenvector, sound_speed, sound_speed * sound_speed, gamma);
reconstruction::Characteristic del_a_G = reconstruction::Primitive_To_Characteristic(
cell_im1, del_G, eigenvector, sound_speed, sound_speed * sound_speed, gamma);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Step 5 - and project the monotonized difference in the characteristic variables back onto the primitive variables
// Stone Eqn 39
reconstruction::Primitive const del_m_im1 = reconstruction::Monotonize_Characteristic_Return_Primitive(
cell_im1, del_L, del_R, del_C, del_G, del_a_L, del_a_R, del_a_C, del_a_G, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
// =============
// Cell i slopes
// =============
// calculate the adiabatic sound speed in cell i
sound_speed = hydro_utilities::Calc_Sound_Speed(cell_i.pressure, cell_i.density, gamma);
// Step 2 - Compute the left, right, centered, and van Leer differences of the primitive variables. Note that here L
// and R refer to locations relative to the cell center Stone Eqn 36
// left
del_L = reconstruction::Compute_Slope(cell_im1, cell_i);
// right
del_R = reconstruction::Compute_Slope(cell_i, cell_ip1);
// centered
del_C = reconstruction::Compute_Slope(cell_im1, cell_ip1, 0.5);
// Van Leer
del_G = reconstruction::Van_Leer_Slope(del_L, del_R);
// Step 3 - Project the left, right, centered and van Leer differences onto the
// characteristic variables Stone Eqn 37 (del_a are differences in
// characteristic variables, see Stone for notation) Use the eigenvectors
// given in Stone 2008, Appendix A
del_a_L = reconstruction::Primitive_To_Characteristic(cell_i, del_L, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_R = reconstruction::Primitive_To_Characteristic(cell_i, del_R, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_C = reconstruction::Primitive_To_Characteristic(cell_i, del_C, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_G = reconstruction::Primitive_To_Characteristic(cell_i, del_G, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Step 5 - and project the monotonized difference in the characteristic variables back onto the primitive variables
// Stone Eqn 39
reconstruction::Primitive del_m_i = reconstruction::Monotonize_Characteristic_Return_Primitive(
cell_i, del_L, del_R, del_C, del_G, del_a_L, del_a_R, del_a_C, del_a_G, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
// ===============
// Cell i+1 slopes
// ===============
// calculate the adiabatic sound speed in cell ipo
sound_speed = hydro_utilities::Calc_Sound_Speed(cell_ip1.pressure, cell_ip1.density, gamma);
// Step 2 - Compute the left, right, centered, and van Leer differences of the primitive variables. Note that here L
// and R refer to locations relative to the cell center Stone Eqn 36
// left
del_L = reconstruction::Compute_Slope(cell_i, cell_ip1);
// right
del_R = reconstruction::Compute_Slope(cell_ip1, cell_ip2);
// centered
del_C = reconstruction::Compute_Slope(cell_i, cell_ip2, 0.5);
// Van Leer
del_G = reconstruction::Van_Leer_Slope(del_L, del_R);
// Step 3 - Project the left, right, centered and van Leer differences onto the
// characteristic variables Stone Eqn 37 (del_a are differences in
// characteristic variables, see Stone for notation) Use the eigenvectors
// given in Stone 2008, Appendix A
del_a_L = reconstruction::Primitive_To_Characteristic(cell_ip1, del_L, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_R = reconstruction::Primitive_To_Characteristic(cell_ip1, del_R, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_C = reconstruction::Primitive_To_Characteristic(cell_ip1, del_C, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_G = reconstruction::Primitive_To_Characteristic(cell_ip1, del_G, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Step 5 - and project the monotonized difference in the characteristic variables back onto the primitive variables
// Stone Eqn 39
reconstruction::Primitive const del_m_ip1 = reconstruction::Monotonize_Characteristic_Return_Primitive(
cell_ip1, del_L, del_R, del_C, del_G, del_a_L, del_a_R, del_a_C, del_a_G, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
// Step 6 - Use parabolic interpolation to compute values at the left and right of each cell center Here, the
// subscripts L and R refer to the left and right side of the ith cell center Stone Eqn 46
reconstruction::Primitive interface_L_iph =
reconstruction::Calc_Interface_Parabolic(cell_ip1, cell_i, del_m_ip1, del_m_i);
reconstruction::Primitive interface_R_imh =
reconstruction::Calc_Interface_Parabolic(cell_i, cell_im1, del_m_i, del_m_im1);
// Step 7 - Apply further monotonicity constraints to ensure the values on the left and right side of cell center lie
// between neighboring cell-centered values Stone Eqns 47 - 53
reconstruction::Monotonize_Parabolic_Interface(cell_i, cell_im1, cell_ip1, interface_L_iph, interface_R_imh);
// This is the beginning of the characteristic tracing
// Step 8 - Compute the coefficients for the monotonized parabolic
// interpolation function
// Stone Eqn 54
del_m_i.density = interface_L_iph.density - interface_R_imh.density;
del_m_i.velocity_x = interface_L_iph.velocity_x - interface_R_imh.velocity_x;
del_m_i.velocity_y = interface_L_iph.velocity_y - interface_R_imh.velocity_y;
del_m_i.velocity_z = interface_L_iph.velocity_z - interface_R_imh.velocity_z;
del_m_i.pressure = interface_L_iph.pressure - interface_R_imh.pressure;
Real const d_6 = 6.0 * (cell_i.density - 0.5 * (interface_R_imh.density + interface_L_iph.density));
Real const vx_6 = 6.0 * (cell_i.velocity_x - 0.5 * (interface_R_imh.velocity_x + interface_L_iph.velocity_x));
Real const vy_6 = 6.0 * (cell_i.velocity_y - 0.5 * (interface_R_imh.velocity_y + interface_L_iph.velocity_y));
Real const vz_6 = 6.0 * (cell_i.velocity_z - 0.5 * (interface_R_imh.velocity_z + interface_L_iph.velocity_z));
Real const p_6 = 6.0 * (cell_i.pressure - 0.5 * (interface_R_imh.pressure + interface_L_iph.pressure));
#ifdef DE
del_m_i.gas_energy = interface_L_iph.gas_energy - interface_R_imh.gas_energy;
Real const ge_6 = 6.0 * (cell_i.gas_energy - 0.5 * (interface_R_imh.gas_energy + interface_L_iph.gas_energy));
#endif // DE
#ifdef SCALAR
Real scalar_6[NSCALARS];
for (int i = 0; i < NSCALARS; i++) {
del_m_i.scalar[i] = interface_L_iph.scalar[i] - interface_R_imh.scalar[i];
scalar_6[i] = 6.0 * (cell_i.scalar[i] - 0.5 * (interface_R_imh.scalar[i] + interface_L_iph.scalar[i]));
}
#endif // SCALAR
// Compute the eigenvalues of the linearized equations in the
// primitive variables using the cell-centered primitive variables
// recalculate the adiabatic sound speed in cell i
sound_speed = hydro_utilities::Calc_Sound_Speed(cell_i.pressure, cell_i.density, gamma);
Real const lambda_m = cell_i.velocity_x - sound_speed;
Real const lambda_0 = cell_i.velocity_x;
Real const lambda_p = cell_i.velocity_x + sound_speed;
// Step 9 - Compute the left and right interface values using monotonized
// parabolic interpolation
// Stone Eqns 55 & 56
// largest eigenvalue
Real const lambda_max = fmax(lambda_p, (Real)0);
// smallest eigenvalue
Real const lambda_min = fmin(lambda_m, (Real)0);
// left interface value, i+1/2
Real const dtodx = dt / dx;
interface_L_iph.density =
interface_L_iph.density -
lambda_max * (0.5 * dtodx) * (del_m_i.density - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * d_6);
interface_L_iph.velocity_x =
interface_L_iph.velocity_x -
lambda_max * (0.5 * dtodx) * (del_m_i.velocity_x - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * vx_6);
interface_L_iph.velocity_y =
interface_L_iph.velocity_y -
lambda_max * (0.5 * dtodx) * (del_m_i.velocity_y - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * vy_6);
interface_L_iph.velocity_z =
interface_L_iph.velocity_z -
lambda_max * (0.5 * dtodx) * (del_m_i.velocity_z - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * vz_6);
interface_L_iph.pressure =
interface_L_iph.pressure -
lambda_max * (0.5 * dtodx) * (del_m_i.pressure - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * p_6);
// right interface value, i-1/2
interface_R_imh.density =
interface_R_imh.density -
lambda_min * (0.5 * dtodx) * (del_m_i.density + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * d_6);
interface_R_imh.velocity_x =
interface_R_imh.velocity_x -
lambda_min * (0.5 * dtodx) * (del_m_i.velocity_x + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * vx_6);
interface_R_imh.velocity_y =
interface_R_imh.velocity_y -
lambda_min * (0.5 * dtodx) * (del_m_i.velocity_y + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * vy_6);
interface_R_imh.velocity_z =
interface_R_imh.velocity_z -
lambda_min * (0.5 * dtodx) * (del_m_i.velocity_z + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * vz_6);
interface_R_imh.pressure =
interface_R_imh.pressure -
lambda_min * (0.5 * dtodx) * (del_m_i.pressure + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * p_6);
#ifdef DE
interface_L_iph.gas_energy =
interface_L_iph.gas_energy -
lambda_max * (0.5 * dtodx) * (del_m_i.gas_energy - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * ge_6);
interface_R_imh.gas_energy =
interface_R_imh.gas_energy -
lambda_min * (0.5 * dtodx) * (del_m_i.gas_energy + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * ge_6);
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
interface_L_iph.scalar[i] =
interface_L_iph.scalar[i] -
lambda_max * (0.5 * dtodx) * (del_m_i.scalar[i] - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * scalar_6[i]);
interface_R_imh.scalar[i] =
interface_R_imh.scalar[i] -
lambda_min * (0.5 * dtodx) * (del_m_i.scalar[i] + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * scalar_6[i]);
}
#endif // SCALAR
// Step 10 - Perform the characteristic tracing
// Stone Eqns 57 - 60
// left-hand interface value, i+1/2
Real sum_1 = 0, sum_2 = 0, sum_3 = 0, sum_4 = 0, sum_5 = 0;
#ifdef DE
Real sum_ge = 0;
Real chi_ge = 0;
#endif // DE
#ifdef SCALAR
Real chi_scalar[NSCALARS];
Real sum_scalar[NSCALARS];
for (Real &val : sum_scalar) {
val = 0;
}
#endif // SCALAR
if (lambda_m >= 0) {
Real const A = (0.5 * dtodx) * (lambda_p - lambda_m);
Real const B = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_p * lambda_p - lambda_m * lambda_m);
Real const chi_1 = A * (del_m_i.density - d_6) + B * d_6;
Real const chi_2 = A * (del_m_i.velocity_x - vx_6) + B * vx_6;
Real const chi_3 = A * (del_m_i.velocity_y - vy_6) + B * vy_6;
Real const chi_4 = A * (del_m_i.velocity_z - vz_6) + B * vz_6;
Real const chi_5 = A * (del_m_i.pressure - p_6) + B * p_6;
sum_1 += -0.5 * (cell_i.density * chi_2 / sound_speed - chi_5 / (sound_speed * sound_speed));
sum_2 += 0.5 * (chi_2 - chi_5 / (sound_speed * cell_i.density));
sum_5 += -0.5 * (cell_i.density * chi_2 * sound_speed - chi_5);
}
if (lambda_0 >= 0) {
Real const A = (0.5 * dtodx) * (lambda_p - lambda_0);
Real const B = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_p * lambda_p - lambda_0 * lambda_0);
Real const chi_1 = A * (del_m_i.density - d_6) + B * d_6;
Real const chi_2 = A * (del_m_i.velocity_x - vx_6) + B * vx_6;
Real const chi_3 = A * (del_m_i.velocity_y - vy_6) + B * vy_6;
Real const chi_4 = A * (del_m_i.velocity_z - vz_6) + B * vz_6;
Real const chi_5 = A * (del_m_i.pressure - p_6) + B * p_6;
#ifdef DE
chi_ge = A * (del_m_i.gas_energy - ge_6) + B * ge_6;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
chi_scalar[i] = A * (del_m_i.scalar[i] - scalar_6[i]) + B * scalar_6[i];
}
#endif // SCALAR
sum_1 += chi_1 - chi_5 / (sound_speed * sound_speed);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif // SCALAR
}
if (lambda_p >= 0) {
Real const A = (0.5 * dtodx) * (lambda_p - lambda_p);
Real const B = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_p * lambda_p - lambda_p * lambda_p);
Real const chi_1 = A * (del_m_i.density - d_6) + B * d_6;
Real const chi_2 = A * (del_m_i.velocity_x - vx_6) + B * vx_6;
Real const chi_3 = A * (del_m_i.velocity_y - vy_6) + B * vy_6;
Real const chi_4 = A * (del_m_i.velocity_z - vz_6) + B * vz_6;
Real const chi_5 = A * (del_m_i.pressure - p_6) + B * p_6;
sum_1 += 0.5 * (cell_i.density * chi_2 / sound_speed + chi_5 / (sound_speed * sound_speed));
sum_2 += 0.5 * (chi_2 + chi_5 / (sound_speed * cell_i.density));
sum_5 += 0.5 * (cell_i.density * chi_2 * sound_speed + chi_5);
}
// add the corrections to the initial guesses for the interface values
interface_L_iph.density += sum_1;
interface_L_iph.velocity_x += sum_2;
interface_L_iph.velocity_y += sum_3;
interface_L_iph.velocity_z += sum_4;
interface_L_iph.pressure += sum_5;
#ifdef DE
interface_L_iph.gas_energy += sum_ge;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
interface_L_iph.scalar[i] += sum_scalar[i];
}
#endif // SCALAR
// right-hand interface value, i-1/2
sum_1 = 0;
sum_2 = 0;
sum_3 = 0;
sum_4 = 0;
sum_5 = 0;
#ifdef DE
sum_ge = 0;
#endif // DE
#ifdef SCALAR
for (Real &val : sum_scalar) {
val = 0;
}
#endif // SCALAR
if (lambda_m <= 0) {
Real const C = (0.5 * dtodx) * (lambda_m - lambda_m);
Real const D = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_m * lambda_m - lambda_m * lambda_m);
Real const chi_1 = C * (del_m_i.density + d_6) + D * d_6;
Real const chi_2 = C * (del_m_i.velocity_x + vx_6) + D * vx_6;
Real const chi_3 = C * (del_m_i.velocity_y + vy_6) + D * vy_6;
Real const chi_4 = C * (del_m_i.velocity_z + vz_6) + D * vz_6;
Real const chi_5 = C * (del_m_i.pressure + p_6) + D * p_6;
sum_1 += -0.5 * (cell_i.density * chi_2 / sound_speed - chi_5 / (sound_speed * sound_speed));
sum_2 += 0.5 * (chi_2 - chi_5 / (sound_speed * cell_i.density));
sum_5 += -0.5 * (cell_i.density * chi_2 * sound_speed - chi_5);
}
if (lambda_0 <= 0) {
Real const C = (0.5 * dtodx) * (lambda_m - lambda_0);
Real const D = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_m * lambda_m - lambda_0 * lambda_0);
Real const chi_1 = C * (del_m_i.density + d_6) + D * d_6;
Real const chi_2 = C * (del_m_i.velocity_x + vx_6) + D * vx_6;
Real const chi_3 = C * (del_m_i.velocity_y + vy_6) + D * vy_6;
Real const chi_4 = C * (del_m_i.velocity_z + vz_6) + D * vz_6;
Real const chi_5 = C * (del_m_i.pressure + p_6) + D * p_6;
#ifdef DE
chi_ge = C * (del_m_i.gas_energy + ge_6) + D * ge_6;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
chi_scalar[i] = C * (del_m_i.scalar[i] + scalar_6[i]) + D * scalar_6[i];
}
#endif // SCALAR
sum_1 += chi_1 - chi_5 / (sound_speed * sound_speed);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif // SCALAR
}
if (lambda_p <= 0) {
Real const C = (0.5 * dtodx) * (lambda_m - lambda_p);
Real const D = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_m * lambda_m - lambda_p * lambda_p);
Real const chi_1 = C * (del_m_i.density + d_6) + D * d_6;
Real const chi_2 = C * (del_m_i.velocity_x + vx_6) + D * vx_6;
Real const chi_3 = C * (del_m_i.velocity_y + vy_6) + D * vy_6;
Real const chi_4 = C * (del_m_i.velocity_z + vz_6) + D * vz_6;
Real const chi_5 = C * (del_m_i.pressure + p_6) + D * p_6;
sum_1 += 0.5 * (cell_i.density * chi_2 / sound_speed + chi_5 / (sound_speed * sound_speed));
sum_2 += 0.5 * (chi_2 + chi_5 / (sound_speed * cell_i.density));
sum_5 += 0.5 * (cell_i.density * chi_2 * sound_speed + chi_5);
}
// add the corrections
interface_R_imh.density += sum_1;
interface_R_imh.velocity_x += sum_2;
interface_R_imh.velocity_y += sum_3;
interface_R_imh.velocity_z += sum_4;
interface_R_imh.pressure += sum_5;
#ifdef DE
interface_R_imh.gas_energy += sum_ge;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
interface_R_imh.scalar[i] += sum_scalar[i];
}
#endif // SCALAR
// This is the end of the characteristic tracing
// enforce minimum values
interface_R_imh.density = fmax(interface_R_imh.density, (Real)TINY_NUMBER);
interface_L_iph.density = fmax(interface_L_iph.density, (Real)TINY_NUMBER);
interface_R_imh.pressure = fmax(interface_R_imh.pressure, (Real)TINY_NUMBER);
interface_L_iph.pressure = fmax(interface_L_iph.pressure, (Real)TINY_NUMBER);
// Step 11 - Send final values back from kernel
// Convert the left and right states in the primitive to the conserved variables send final values back from kernel
// bounds_R refers to the right side of the i-1/2 interface
size_t id = cuda_utilities::compute1DIndex(xid, yid, zid, nx, ny);
reconstruction::Write_Data(interface_L_iph, dev_bounds_L, dev_conserved, id, n_cells, o1, o2, o3, gamma);
id = cuda_utilities::compute1DIndex(xid - int(dir == 0), yid - int(dir == 1), zid - int(dir == 2), nx, ny);
reconstruction::Write_Data(interface_R_imh, dev_bounds_R, dev_conserved, id, n_cells, o1, o2, o3, gamma);
}
// =====================================================================================================================
// =====================================================================================================================
__global__ __launch_bounds__(TPB) void PPMC_VL(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx,
int ny, int nz, Real gamma, int dir)
{
// get a thread ID
int const thread_id = threadIdx.x + blockIdx.x * blockDim.x;
int xid, yid, zid;
cuda_utilities::compute3DIndices(thread_id, nx, ny, xid, yid, zid);
// Ensure that we are only operating on cells that will be used
if (reconstruction::Thread_Guard<3>(nx, ny, nz, xid, yid, zid)) {
return;
}
// Compute the total number of cells
int const n_cells = nx * ny * nz;
// Set the field indices for the various directions
int o1, o2, o3;
switch (dir) {
case 0:
o1 = grid_enum::momentum_x;
o2 = grid_enum::momentum_y;
o3 = grid_enum::momentum_z;
break;
case 1:
o1 = grid_enum::momentum_y;
o2 = grid_enum::momentum_z;
o3 = grid_enum::momentum_x;
break;
case 2:
o1 = grid_enum::momentum_z;
o2 = grid_enum::momentum_x;
o3 = grid_enum::momentum_y;
break;
}
// load the 5-cell stencil into registers
// cell i
reconstruction::Primitive const cell_i =
reconstruction::Load_Data(dev_conserved, xid, yid, zid, nx, ny, n_cells, o1, o2, o3, gamma);
// cell i-1. The equality checks the direction and will subtract one from the correct direction
// im1 stands for "i minus 1"
reconstruction::Primitive const cell_im1 = reconstruction::Load_Data(
dev_conserved, xid - int(dir == 0), yid - int(dir == 1), zid - int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i+1. The equality checks the direction and add one to the correct direction
// ip1 stands for "i plus 1"
reconstruction::Primitive const cell_ip1 = reconstruction::Load_Data(
dev_conserved, xid + int(dir == 0), yid + int(dir == 1), zid + int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i-2. The equality checks the direction and will subtract two from the correct direction
// im2 stands for "i minus 2"
reconstruction::Primitive const cell_im2 =
reconstruction::Load_Data(dev_conserved, xid - 2 * int(dir == 0), yid - 2 * int(dir == 1),
zid - 2 * int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i+2. The equality checks the direction and add two to the correct direction
// ip2 stands for "i plus 2"
reconstruction::Primitive const cell_ip2 =
reconstruction::Load_Data(dev_conserved, xid + 2 * int(dir == 0), yid + 2 * int(dir == 1),
zid + 2 * int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// Convert to the characteristic variables
Real const sound_speed = hydro_utilities::Calc_Sound_Speed(cell_i.pressure, cell_i.density, gamma);
Real const sound_speed_squared = sound_speed * sound_speed;
#ifdef MHD
reconstruction::EigenVecs eigenvectors =
reconstruction::Compute_Eigenvectors(cell_i, sound_speed, sound_speed_squared, gamma);
#else
reconstruction::EigenVecs eigenvectors;
#endif // MHD
// Cell i
reconstruction::Characteristic const cell_i_characteristic = reconstruction::Primitive_To_Characteristic(
cell_i, cell_i, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Cell i-1
reconstruction::Characteristic const cell_im1_characteristic = reconstruction::Primitive_To_Characteristic(
cell_i, cell_im1, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Cell i-2
reconstruction::Characteristic const cell_im2_characteristic = reconstruction::Primitive_To_Characteristic(
cell_i, cell_im2, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Cell i+1
reconstruction::Characteristic const cell_ip1_characteristic = reconstruction::Primitive_To_Characteristic(
cell_i, cell_ip1, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Cell i+2
reconstruction::Characteristic const cell_ip2_characteristic = reconstruction::Primitive_To_Characteristic(
cell_i, cell_ip2, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Compute the interface states for each field
reconstruction::Characteristic interface_R_imh_characteristic, interface_L_iph_characteristic;
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a0, cell_im1_characteristic.a0, cell_i_characteristic.a0,
cell_ip1_characteristic.a0, cell_ip2_characteristic.a0,
interface_L_iph_characteristic.a0, interface_R_imh_characteristic.a0);
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a1, cell_im1_characteristic.a1, cell_i_characteristic.a1,
cell_ip1_characteristic.a1, cell_ip2_characteristic.a1,
interface_L_iph_characteristic.a1, interface_R_imh_characteristic.a1);
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a2, cell_im1_characteristic.a2, cell_i_characteristic.a2,
cell_ip1_characteristic.a2, cell_ip2_characteristic.a2,
interface_L_iph_characteristic.a2, interface_R_imh_characteristic.a2);
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a3, cell_im1_characteristic.a3, cell_i_characteristic.a3,
cell_ip1_characteristic.a3, cell_ip2_characteristic.a3,
interface_L_iph_characteristic.a3, interface_R_imh_characteristic.a3);
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a4, cell_im1_characteristic.a4, cell_i_characteristic.a4,
cell_ip1_characteristic.a4, cell_ip2_characteristic.a4,
interface_L_iph_characteristic.a4, interface_R_imh_characteristic.a4);
#ifdef MHD
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a5, cell_im1_characteristic.a5, cell_i_characteristic.a5,
cell_ip1_characteristic.a5, cell_ip2_characteristic.a5,
interface_L_iph_characteristic.a5, interface_R_imh_characteristic.a5);
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a6, cell_im1_characteristic.a6, cell_i_characteristic.a6,
cell_ip1_characteristic.a6, cell_ip2_characteristic.a6,
interface_L_iph_characteristic.a6, interface_R_imh_characteristic.a6);
#endif // MHD
// Convert back to primitive variables
reconstruction::Primitive interface_L_iph = reconstruction::Characteristic_To_Primitive(
cell_i, interface_L_iph_characteristic, eigenvectors, sound_speed, sound_speed_squared, gamma);
reconstruction::Primitive interface_R_imh = reconstruction::Characteristic_To_Primitive(
cell_i, interface_R_imh_characteristic, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Compute the interfaces for the variables that don't have characteristics
#ifdef DE
reconstruction::PPM_Single_Variable(cell_im2.gas_energy, cell_im1.gas_energy, cell_i.gas_energy, cell_ip1.gas_energy,
cell_ip2.gas_energy, interface_L_iph.gas_energy, interface_R_imh.gas_energy);
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
reconstruction::PPM_Single_Variable(cell_im2.scalar[i], cell_im1.scalar[i], cell_i.scalar[i], cell_ip1.scalar[i],
cell_ip2.scalar[i], interface_L_iph.scalar[i], interface_R_imh.scalar[i]);
}
#endif // SCALAR
// enforce minimum values
interface_R_imh.density = fmax(interface_R_imh.density, (Real)TINY_NUMBER);
interface_L_iph.density = fmax(interface_L_iph.density, (Real)TINY_NUMBER);
interface_R_imh.pressure = fmax(interface_R_imh.pressure, (Real)TINY_NUMBER);
interface_L_iph.pressure = fmax(interface_L_iph.pressure, (Real)TINY_NUMBER);
// Step 11 - Send final values back from kernel
// Convert the left and right states in the primitive to the conserved variables send final values back from kernel
// bounds_R refers to the right side of the i-1/2 interface
size_t id = cuda_utilities::compute1DIndex(xid, yid, zid, nx, ny);
reconstruction::Write_Data(interface_L_iph, dev_bounds_L, dev_conserved, id, n_cells, o1, o2, o3, gamma);
id = cuda_utilities::compute1DIndex(xid - int(dir == 0), yid - int(dir == 1), zid - int(dir == 2), nx, ny);
reconstruction::Write_Data(interface_R_imh, dev_bounds_R, dev_conserved, id, n_cells, o1, o2, o3, gamma);
}
// =====================================================================================================================
| c40e5798d7e80bf083c79086cd7419e6dd931590.cu | /*! \file ppmc_cuda.cu
* \brief Functions definitions for the ppm kernels, using characteristic
tracing. Written following Stone et al. 2008. */
#include <math.h>
#include "../global/global.h"
#include "../global/global_cuda.h"
#include "../reconstruction/ppmc_cuda.h"
#include "../reconstruction/reconstruction.h"
#include "../utils/gpu.hpp"
#include "../utils/hydro_utilities.h"
#ifdef DE // PRESSURE_DE
#include "../utils/hydro_utilities.h"
#endif
// =====================================================================================================================
/*!
* \brief When passed a stencil of conserved variables, returns the left and
right boundary values for the interface calculated using ppm. */
__global__ void PPMC_CTU(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, Real dx,
Real dt, Real gamma, int dir)
{
// get a thread ID
int const thread_id = threadIdx.x + blockIdx.x * blockDim.x;
int xid, yid, zid;
cuda_utilities::compute3DIndices(thread_id, nx, ny, xid, yid, zid);
if (reconstruction::Thread_Guard<3>(nx, ny, nz, xid, yid, zid)) {
return;
}
// Compute the total number of cells
int const n_cells = nx * ny * nz;
// Set the field indices for the various directions
int o1, o2, o3;
switch (dir) {
case 0:
o1 = grid_enum::momentum_x;
o2 = grid_enum::momentum_y;
o3 = grid_enum::momentum_z;
break;
case 1:
o1 = grid_enum::momentum_y;
o2 = grid_enum::momentum_z;
o3 = grid_enum::momentum_x;
break;
case 2:
o1 = grid_enum::momentum_z;
o2 = grid_enum::momentum_x;
o3 = grid_enum::momentum_y;
break;
}
// load the 5-cell stencil into registers
// cell i
reconstruction::Primitive const cell_i =
reconstruction::Load_Data(dev_conserved, xid, yid, zid, nx, ny, n_cells, o1, o2, o3, gamma);
// cell i-1. The equality checks check the direction and subtracts one from the direction
// im1 stands for "i minus 1"
reconstruction::Primitive const cell_im1 = reconstruction::Load_Data(
dev_conserved, xid - int(dir == 0), yid - int(dir == 1), zid - int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i+1. The equality checks check the direction and adds one to the direction
// ip1 stands for "i plus 1"
reconstruction::Primitive const cell_ip1 = reconstruction::Load_Data(
dev_conserved, xid + int(dir == 0), yid + int(dir == 1), zid + int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i-2. The equality checks check the direction and subtracts one from the direction
// im2 stands for "i minus 2"
reconstruction::Primitive const cell_im2 =
reconstruction::Load_Data(dev_conserved, xid - 2 * int(dir == 0), yid - 2 * int(dir == 1),
zid - 2 * int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i+2. The equality checks check the direction and adds one to the direction
// ip2 stands for "i plus 2"
reconstruction::Primitive const cell_ip2 =
reconstruction::Load_Data(dev_conserved, xid + 2 * int(dir == 0), yid + 2 * int(dir == 1),
zid + 2 * int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// Steps 2 - 5 are repeated for cell i-1, i, and i+1
// ===============
// Cell i-1 slopes
// ===============
// calculate the adiabatic sound speed in cell im1
Real sound_speed = hydro_utilities::Calc_Sound_Speed(cell_im1.pressure, cell_im1.density, gamma);
// this isn't actually used and the compiler should optimize it away but since this is the only reconstruction
// function that won't use it it was easier to add it here as an unused variable
reconstruction::EigenVecs eigenvector;
// Step 2 - Compute the left, right, centered, and van Leer differences of the primitive variables. Note that here L
// and R refer to locations relative to the cell center Stone Eqn 36
// left
reconstruction::Primitive del_L = reconstruction::Compute_Slope(cell_im2, cell_im1);
// right
reconstruction::Primitive del_R = reconstruction::Compute_Slope(cell_im1, cell_i);
// centered
reconstruction::Primitive del_C = reconstruction::Compute_Slope(cell_im2, cell_i, 0.5);
// Van Leer
reconstruction::Primitive del_G = reconstruction::Van_Leer_Slope(del_L, del_R);
// Step 3 - Project the left, right, centered and van Leer differences onto the
// characteristic variables Stone Eqn 37 (del_a are differences in
// characteristic variables, see Stone for notation) Use the eigenvectors
// given in Stone 2008, Appendix A
reconstruction::Characteristic del_a_L = reconstruction::Primitive_To_Characteristic(
cell_im1, del_L, eigenvector, sound_speed, sound_speed * sound_speed, gamma);
reconstruction::Characteristic del_a_R = reconstruction::Primitive_To_Characteristic(
cell_im1, del_R, eigenvector, sound_speed, sound_speed * sound_speed, gamma);
reconstruction::Characteristic del_a_C = reconstruction::Primitive_To_Characteristic(
cell_im1, del_C, eigenvector, sound_speed, sound_speed * sound_speed, gamma);
reconstruction::Characteristic del_a_G = reconstruction::Primitive_To_Characteristic(
cell_im1, del_G, eigenvector, sound_speed, sound_speed * sound_speed, gamma);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Step 5 - and project the monotonized difference in the characteristic variables back onto the primitive variables
// Stone Eqn 39
reconstruction::Primitive const del_m_im1 = reconstruction::Monotonize_Characteristic_Return_Primitive(
cell_im1, del_L, del_R, del_C, del_G, del_a_L, del_a_R, del_a_C, del_a_G, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
// =============
// Cell i slopes
// =============
// calculate the adiabatic sound speed in cell i
sound_speed = hydro_utilities::Calc_Sound_Speed(cell_i.pressure, cell_i.density, gamma);
// Step 2 - Compute the left, right, centered, and van Leer differences of the primitive variables. Note that here L
// and R refer to locations relative to the cell center Stone Eqn 36
// left
del_L = reconstruction::Compute_Slope(cell_im1, cell_i);
// right
del_R = reconstruction::Compute_Slope(cell_i, cell_ip1);
// centered
del_C = reconstruction::Compute_Slope(cell_im1, cell_ip1, 0.5);
// Van Leer
del_G = reconstruction::Van_Leer_Slope(del_L, del_R);
// Step 3 - Project the left, right, centered and van Leer differences onto the
// characteristic variables Stone Eqn 37 (del_a are differences in
// characteristic variables, see Stone for notation) Use the eigenvectors
// given in Stone 2008, Appendix A
del_a_L = reconstruction::Primitive_To_Characteristic(cell_i, del_L, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_R = reconstruction::Primitive_To_Characteristic(cell_i, del_R, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_C = reconstruction::Primitive_To_Characteristic(cell_i, del_C, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_G = reconstruction::Primitive_To_Characteristic(cell_i, del_G, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Step 5 - and project the monotonized difference in the characteristic variables back onto the primitive variables
// Stone Eqn 39
reconstruction::Primitive del_m_i = reconstruction::Monotonize_Characteristic_Return_Primitive(
cell_i, del_L, del_R, del_C, del_G, del_a_L, del_a_R, del_a_C, del_a_G, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
// ===============
// Cell i+1 slopes
// ===============
// calculate the adiabatic sound speed in cell ipo
sound_speed = hydro_utilities::Calc_Sound_Speed(cell_ip1.pressure, cell_ip1.density, gamma);
// Step 2 - Compute the left, right, centered, and van Leer differences of the primitive variables. Note that here L
// and R refer to locations relative to the cell center Stone Eqn 36
// left
del_L = reconstruction::Compute_Slope(cell_i, cell_ip1);
// right
del_R = reconstruction::Compute_Slope(cell_ip1, cell_ip2);
// centered
del_C = reconstruction::Compute_Slope(cell_i, cell_ip2, 0.5);
// Van Leer
del_G = reconstruction::Van_Leer_Slope(del_L, del_R);
// Step 3 - Project the left, right, centered and van Leer differences onto the
// characteristic variables Stone Eqn 37 (del_a are differences in
// characteristic variables, see Stone for notation) Use the eigenvectors
// given in Stone 2008, Appendix A
del_a_L = reconstruction::Primitive_To_Characteristic(cell_ip1, del_L, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_R = reconstruction::Primitive_To_Characteristic(cell_ip1, del_R, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_C = reconstruction::Primitive_To_Characteristic(cell_ip1, del_C, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
del_a_G = reconstruction::Primitive_To_Characteristic(cell_ip1, del_G, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Step 5 - and project the monotonized difference in the characteristic variables back onto the primitive variables
// Stone Eqn 39
reconstruction::Primitive const del_m_ip1 = reconstruction::Monotonize_Characteristic_Return_Primitive(
cell_ip1, del_L, del_R, del_C, del_G, del_a_L, del_a_R, del_a_C, del_a_G, eigenvector, sound_speed,
sound_speed * sound_speed, gamma);
// Step 6 - Use parabolic interpolation to compute values at the left and right of each cell center Here, the
// subscripts L and R refer to the left and right side of the ith cell center Stone Eqn 46
reconstruction::Primitive interface_L_iph =
reconstruction::Calc_Interface_Parabolic(cell_ip1, cell_i, del_m_ip1, del_m_i);
reconstruction::Primitive interface_R_imh =
reconstruction::Calc_Interface_Parabolic(cell_i, cell_im1, del_m_i, del_m_im1);
// Step 7 - Apply further monotonicity constraints to ensure the values on the left and right side of cell center lie
// between neighboring cell-centered values Stone Eqns 47 - 53
reconstruction::Monotonize_Parabolic_Interface(cell_i, cell_im1, cell_ip1, interface_L_iph, interface_R_imh);
// This is the beginning of the characteristic tracing
// Step 8 - Compute the coefficients for the monotonized parabolic
// interpolation function
// Stone Eqn 54
del_m_i.density = interface_L_iph.density - interface_R_imh.density;
del_m_i.velocity_x = interface_L_iph.velocity_x - interface_R_imh.velocity_x;
del_m_i.velocity_y = interface_L_iph.velocity_y - interface_R_imh.velocity_y;
del_m_i.velocity_z = interface_L_iph.velocity_z - interface_R_imh.velocity_z;
del_m_i.pressure = interface_L_iph.pressure - interface_R_imh.pressure;
Real const d_6 = 6.0 * (cell_i.density - 0.5 * (interface_R_imh.density + interface_L_iph.density));
Real const vx_6 = 6.0 * (cell_i.velocity_x - 0.5 * (interface_R_imh.velocity_x + interface_L_iph.velocity_x));
Real const vy_6 = 6.0 * (cell_i.velocity_y - 0.5 * (interface_R_imh.velocity_y + interface_L_iph.velocity_y));
Real const vz_6 = 6.0 * (cell_i.velocity_z - 0.5 * (interface_R_imh.velocity_z + interface_L_iph.velocity_z));
Real const p_6 = 6.0 * (cell_i.pressure - 0.5 * (interface_R_imh.pressure + interface_L_iph.pressure));
#ifdef DE
del_m_i.gas_energy = interface_L_iph.gas_energy - interface_R_imh.gas_energy;
Real const ge_6 = 6.0 * (cell_i.gas_energy - 0.5 * (interface_R_imh.gas_energy + interface_L_iph.gas_energy));
#endif // DE
#ifdef SCALAR
Real scalar_6[NSCALARS];
for (int i = 0; i < NSCALARS; i++) {
del_m_i.scalar[i] = interface_L_iph.scalar[i] - interface_R_imh.scalar[i];
scalar_6[i] = 6.0 * (cell_i.scalar[i] - 0.5 * (interface_R_imh.scalar[i] + interface_L_iph.scalar[i]));
}
#endif // SCALAR
// Compute the eigenvalues of the linearized equations in the
// primitive variables using the cell-centered primitive variables
// recalculate the adiabatic sound speed in cell i
sound_speed = hydro_utilities::Calc_Sound_Speed(cell_i.pressure, cell_i.density, gamma);
Real const lambda_m = cell_i.velocity_x - sound_speed;
Real const lambda_0 = cell_i.velocity_x;
Real const lambda_p = cell_i.velocity_x + sound_speed;
// Step 9 - Compute the left and right interface values using monotonized
// parabolic interpolation
// Stone Eqns 55 & 56
// largest eigenvalue
Real const lambda_max = fmax(lambda_p, (Real)0);
// smallest eigenvalue
Real const lambda_min = fmin(lambda_m, (Real)0);
// left interface value, i+1/2
Real const dtodx = dt / dx;
interface_L_iph.density =
interface_L_iph.density -
lambda_max * (0.5 * dtodx) * (del_m_i.density - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * d_6);
interface_L_iph.velocity_x =
interface_L_iph.velocity_x -
lambda_max * (0.5 * dtodx) * (del_m_i.velocity_x - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * vx_6);
interface_L_iph.velocity_y =
interface_L_iph.velocity_y -
lambda_max * (0.5 * dtodx) * (del_m_i.velocity_y - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * vy_6);
interface_L_iph.velocity_z =
interface_L_iph.velocity_z -
lambda_max * (0.5 * dtodx) * (del_m_i.velocity_z - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * vz_6);
interface_L_iph.pressure =
interface_L_iph.pressure -
lambda_max * (0.5 * dtodx) * (del_m_i.pressure - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * p_6);
// right interface value, i-1/2
interface_R_imh.density =
interface_R_imh.density -
lambda_min * (0.5 * dtodx) * (del_m_i.density + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * d_6);
interface_R_imh.velocity_x =
interface_R_imh.velocity_x -
lambda_min * (0.5 * dtodx) * (del_m_i.velocity_x + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * vx_6);
interface_R_imh.velocity_y =
interface_R_imh.velocity_y -
lambda_min * (0.5 * dtodx) * (del_m_i.velocity_y + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * vy_6);
interface_R_imh.velocity_z =
interface_R_imh.velocity_z -
lambda_min * (0.5 * dtodx) * (del_m_i.velocity_z + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * vz_6);
interface_R_imh.pressure =
interface_R_imh.pressure -
lambda_min * (0.5 * dtodx) * (del_m_i.pressure + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * p_6);
#ifdef DE
interface_L_iph.gas_energy =
interface_L_iph.gas_energy -
lambda_max * (0.5 * dtodx) * (del_m_i.gas_energy - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * ge_6);
interface_R_imh.gas_energy =
interface_R_imh.gas_energy -
lambda_min * (0.5 * dtodx) * (del_m_i.gas_energy + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * ge_6);
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
interface_L_iph.scalar[i] =
interface_L_iph.scalar[i] -
lambda_max * (0.5 * dtodx) * (del_m_i.scalar[i] - (1.0 - (2.0 / 3.0) * lambda_max * dtodx) * scalar_6[i]);
interface_R_imh.scalar[i] =
interface_R_imh.scalar[i] -
lambda_min * (0.5 * dtodx) * (del_m_i.scalar[i] + (1.0 + (2.0 / 3.0) * lambda_min * dtodx) * scalar_6[i]);
}
#endif // SCALAR
// Step 10 - Perform the characteristic tracing
// Stone Eqns 57 - 60
// left-hand interface value, i+1/2
Real sum_1 = 0, sum_2 = 0, sum_3 = 0, sum_4 = 0, sum_5 = 0;
#ifdef DE
Real sum_ge = 0;
Real chi_ge = 0;
#endif // DE
#ifdef SCALAR
Real chi_scalar[NSCALARS];
Real sum_scalar[NSCALARS];
for (Real &val : sum_scalar) {
val = 0;
}
#endif // SCALAR
if (lambda_m >= 0) {
Real const A = (0.5 * dtodx) * (lambda_p - lambda_m);
Real const B = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_p * lambda_p - lambda_m * lambda_m);
Real const chi_1 = A * (del_m_i.density - d_6) + B * d_6;
Real const chi_2 = A * (del_m_i.velocity_x - vx_6) + B * vx_6;
Real const chi_3 = A * (del_m_i.velocity_y - vy_6) + B * vy_6;
Real const chi_4 = A * (del_m_i.velocity_z - vz_6) + B * vz_6;
Real const chi_5 = A * (del_m_i.pressure - p_6) + B * p_6;
sum_1 += -0.5 * (cell_i.density * chi_2 / sound_speed - chi_5 / (sound_speed * sound_speed));
sum_2 += 0.5 * (chi_2 - chi_5 / (sound_speed * cell_i.density));
sum_5 += -0.5 * (cell_i.density * chi_2 * sound_speed - chi_5);
}
if (lambda_0 >= 0) {
Real const A = (0.5 * dtodx) * (lambda_p - lambda_0);
Real const B = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_p * lambda_p - lambda_0 * lambda_0);
Real const chi_1 = A * (del_m_i.density - d_6) + B * d_6;
Real const chi_2 = A * (del_m_i.velocity_x - vx_6) + B * vx_6;
Real const chi_3 = A * (del_m_i.velocity_y - vy_6) + B * vy_6;
Real const chi_4 = A * (del_m_i.velocity_z - vz_6) + B * vz_6;
Real const chi_5 = A * (del_m_i.pressure - p_6) + B * p_6;
#ifdef DE
chi_ge = A * (del_m_i.gas_energy - ge_6) + B * ge_6;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
chi_scalar[i] = A * (del_m_i.scalar[i] - scalar_6[i]) + B * scalar_6[i];
}
#endif // SCALAR
sum_1 += chi_1 - chi_5 / (sound_speed * sound_speed);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif // SCALAR
}
if (lambda_p >= 0) {
Real const A = (0.5 * dtodx) * (lambda_p - lambda_p);
Real const B = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_p * lambda_p - lambda_p * lambda_p);
Real const chi_1 = A * (del_m_i.density - d_6) + B * d_6;
Real const chi_2 = A * (del_m_i.velocity_x - vx_6) + B * vx_6;
Real const chi_3 = A * (del_m_i.velocity_y - vy_6) + B * vy_6;
Real const chi_4 = A * (del_m_i.velocity_z - vz_6) + B * vz_6;
Real const chi_5 = A * (del_m_i.pressure - p_6) + B * p_6;
sum_1 += 0.5 * (cell_i.density * chi_2 / sound_speed + chi_5 / (sound_speed * sound_speed));
sum_2 += 0.5 * (chi_2 + chi_5 / (sound_speed * cell_i.density));
sum_5 += 0.5 * (cell_i.density * chi_2 * sound_speed + chi_5);
}
// add the corrections to the initial guesses for the interface values
interface_L_iph.density += sum_1;
interface_L_iph.velocity_x += sum_2;
interface_L_iph.velocity_y += sum_3;
interface_L_iph.velocity_z += sum_4;
interface_L_iph.pressure += sum_5;
#ifdef DE
interface_L_iph.gas_energy += sum_ge;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
interface_L_iph.scalar[i] += sum_scalar[i];
}
#endif // SCALAR
// right-hand interface value, i-1/2
sum_1 = 0;
sum_2 = 0;
sum_3 = 0;
sum_4 = 0;
sum_5 = 0;
#ifdef DE
sum_ge = 0;
#endif // DE
#ifdef SCALAR
for (Real &val : sum_scalar) {
val = 0;
}
#endif // SCALAR
if (lambda_m <= 0) {
Real const C = (0.5 * dtodx) * (lambda_m - lambda_m);
Real const D = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_m * lambda_m - lambda_m * lambda_m);
Real const chi_1 = C * (del_m_i.density + d_6) + D * d_6;
Real const chi_2 = C * (del_m_i.velocity_x + vx_6) + D * vx_6;
Real const chi_3 = C * (del_m_i.velocity_y + vy_6) + D * vy_6;
Real const chi_4 = C * (del_m_i.velocity_z + vz_6) + D * vz_6;
Real const chi_5 = C * (del_m_i.pressure + p_6) + D * p_6;
sum_1 += -0.5 * (cell_i.density * chi_2 / sound_speed - chi_5 / (sound_speed * sound_speed));
sum_2 += 0.5 * (chi_2 - chi_5 / (sound_speed * cell_i.density));
sum_5 += -0.5 * (cell_i.density * chi_2 * sound_speed - chi_5);
}
if (lambda_0 <= 0) {
Real const C = (0.5 * dtodx) * (lambda_m - lambda_0);
Real const D = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_m * lambda_m - lambda_0 * lambda_0);
Real const chi_1 = C * (del_m_i.density + d_6) + D * d_6;
Real const chi_2 = C * (del_m_i.velocity_x + vx_6) + D * vx_6;
Real const chi_3 = C * (del_m_i.velocity_y + vy_6) + D * vy_6;
Real const chi_4 = C * (del_m_i.velocity_z + vz_6) + D * vz_6;
Real const chi_5 = C * (del_m_i.pressure + p_6) + D * p_6;
#ifdef DE
chi_ge = C * (del_m_i.gas_energy + ge_6) + D * ge_6;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
chi_scalar[i] = C * (del_m_i.scalar[i] + scalar_6[i]) + D * scalar_6[i];
}
#endif // SCALAR
sum_1 += chi_1 - chi_5 / (sound_speed * sound_speed);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif // SCALAR
}
if (lambda_p <= 0) {
Real const C = (0.5 * dtodx) * (lambda_m - lambda_p);
Real const D = (1.0 / 3.0) * (dtodx) * (dtodx) * (lambda_m * lambda_m - lambda_p * lambda_p);
Real const chi_1 = C * (del_m_i.density + d_6) + D * d_6;
Real const chi_2 = C * (del_m_i.velocity_x + vx_6) + D * vx_6;
Real const chi_3 = C * (del_m_i.velocity_y + vy_6) + D * vy_6;
Real const chi_4 = C * (del_m_i.velocity_z + vz_6) + D * vz_6;
Real const chi_5 = C * (del_m_i.pressure + p_6) + D * p_6;
sum_1 += 0.5 * (cell_i.density * chi_2 / sound_speed + chi_5 / (sound_speed * sound_speed));
sum_2 += 0.5 * (chi_2 + chi_5 / (sound_speed * cell_i.density));
sum_5 += 0.5 * (cell_i.density * chi_2 * sound_speed + chi_5);
}
// add the corrections
interface_R_imh.density += sum_1;
interface_R_imh.velocity_x += sum_2;
interface_R_imh.velocity_y += sum_3;
interface_R_imh.velocity_z += sum_4;
interface_R_imh.pressure += sum_5;
#ifdef DE
interface_R_imh.gas_energy += sum_ge;
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
interface_R_imh.scalar[i] += sum_scalar[i];
}
#endif // SCALAR
// This is the end of the characteristic tracing
// enforce minimum values
interface_R_imh.density = fmax(interface_R_imh.density, (Real)TINY_NUMBER);
interface_L_iph.density = fmax(interface_L_iph.density, (Real)TINY_NUMBER);
interface_R_imh.pressure = fmax(interface_R_imh.pressure, (Real)TINY_NUMBER);
interface_L_iph.pressure = fmax(interface_L_iph.pressure, (Real)TINY_NUMBER);
// Step 11 - Send final values back from kernel
// Convert the left and right states in the primitive to the conserved variables send final values back from kernel
// bounds_R refers to the right side of the i-1/2 interface
size_t id = cuda_utilities::compute1DIndex(xid, yid, zid, nx, ny);
reconstruction::Write_Data(interface_L_iph, dev_bounds_L, dev_conserved, id, n_cells, o1, o2, o3, gamma);
id = cuda_utilities::compute1DIndex(xid - int(dir == 0), yid - int(dir == 1), zid - int(dir == 2), nx, ny);
reconstruction::Write_Data(interface_R_imh, dev_bounds_R, dev_conserved, id, n_cells, o1, o2, o3, gamma);
}
// =====================================================================================================================
// =====================================================================================================================
__global__ __launch_bounds__(TPB) void PPMC_VL(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx,
int ny, int nz, Real gamma, int dir)
{
// get a thread ID
int const thread_id = threadIdx.x + blockIdx.x * blockDim.x;
int xid, yid, zid;
cuda_utilities::compute3DIndices(thread_id, nx, ny, xid, yid, zid);
// Ensure that we are only operating on cells that will be used
if (reconstruction::Thread_Guard<3>(nx, ny, nz, xid, yid, zid)) {
return;
}
// Compute the total number of cells
int const n_cells = nx * ny * nz;
// Set the field indices for the various directions
int o1, o2, o3;
switch (dir) {
case 0:
o1 = grid_enum::momentum_x;
o2 = grid_enum::momentum_y;
o3 = grid_enum::momentum_z;
break;
case 1:
o1 = grid_enum::momentum_y;
o2 = grid_enum::momentum_z;
o3 = grid_enum::momentum_x;
break;
case 2:
o1 = grid_enum::momentum_z;
o2 = grid_enum::momentum_x;
o3 = grid_enum::momentum_y;
break;
}
// load the 5-cell stencil into registers
// cell i
reconstruction::Primitive const cell_i =
reconstruction::Load_Data(dev_conserved, xid, yid, zid, nx, ny, n_cells, o1, o2, o3, gamma);
// cell i-1. The equality checks the direction and will subtract one from the correct direction
// im1 stands for "i minus 1"
reconstruction::Primitive const cell_im1 = reconstruction::Load_Data(
dev_conserved, xid - int(dir == 0), yid - int(dir == 1), zid - int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i+1. The equality checks the direction and add one to the correct direction
// ip1 stands for "i plus 1"
reconstruction::Primitive const cell_ip1 = reconstruction::Load_Data(
dev_conserved, xid + int(dir == 0), yid + int(dir == 1), zid + int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i-2. The equality checks the direction and will subtract two from the correct direction
// im2 stands for "i minus 2"
reconstruction::Primitive const cell_im2 =
reconstruction::Load_Data(dev_conserved, xid - 2 * int(dir == 0), yid - 2 * int(dir == 1),
zid - 2 * int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// cell i+2. The equality checks the direction and add two to the correct direction
// ip2 stands for "i plus 2"
reconstruction::Primitive const cell_ip2 =
reconstruction::Load_Data(dev_conserved, xid + 2 * int(dir == 0), yid + 2 * int(dir == 1),
zid + 2 * int(dir == 2), nx, ny, n_cells, o1, o2, o3, gamma);
// Convert to the characteristic variables
Real const sound_speed = hydro_utilities::Calc_Sound_Speed(cell_i.pressure, cell_i.density, gamma);
Real const sound_speed_squared = sound_speed * sound_speed;
#ifdef MHD
reconstruction::EigenVecs eigenvectors =
reconstruction::Compute_Eigenvectors(cell_i, sound_speed, sound_speed_squared, gamma);
#else
reconstruction::EigenVecs eigenvectors;
#endif // MHD
// Cell i
reconstruction::Characteristic const cell_i_characteristic = reconstruction::Primitive_To_Characteristic(
cell_i, cell_i, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Cell i-1
reconstruction::Characteristic const cell_im1_characteristic = reconstruction::Primitive_To_Characteristic(
cell_i, cell_im1, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Cell i-2
reconstruction::Characteristic const cell_im2_characteristic = reconstruction::Primitive_To_Characteristic(
cell_i, cell_im2, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Cell i+1
reconstruction::Characteristic const cell_ip1_characteristic = reconstruction::Primitive_To_Characteristic(
cell_i, cell_ip1, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Cell i+2
reconstruction::Characteristic const cell_ip2_characteristic = reconstruction::Primitive_To_Characteristic(
cell_i, cell_ip2, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Compute the interface states for each field
reconstruction::Characteristic interface_R_imh_characteristic, interface_L_iph_characteristic;
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a0, cell_im1_characteristic.a0, cell_i_characteristic.a0,
cell_ip1_characteristic.a0, cell_ip2_characteristic.a0,
interface_L_iph_characteristic.a0, interface_R_imh_characteristic.a0);
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a1, cell_im1_characteristic.a1, cell_i_characteristic.a1,
cell_ip1_characteristic.a1, cell_ip2_characteristic.a1,
interface_L_iph_characteristic.a1, interface_R_imh_characteristic.a1);
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a2, cell_im1_characteristic.a2, cell_i_characteristic.a2,
cell_ip1_characteristic.a2, cell_ip2_characteristic.a2,
interface_L_iph_characteristic.a2, interface_R_imh_characteristic.a2);
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a3, cell_im1_characteristic.a3, cell_i_characteristic.a3,
cell_ip1_characteristic.a3, cell_ip2_characteristic.a3,
interface_L_iph_characteristic.a3, interface_R_imh_characteristic.a3);
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a4, cell_im1_characteristic.a4, cell_i_characteristic.a4,
cell_ip1_characteristic.a4, cell_ip2_characteristic.a4,
interface_L_iph_characteristic.a4, interface_R_imh_characteristic.a4);
#ifdef MHD
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a5, cell_im1_characteristic.a5, cell_i_characteristic.a5,
cell_ip1_characteristic.a5, cell_ip2_characteristic.a5,
interface_L_iph_characteristic.a5, interface_R_imh_characteristic.a5);
reconstruction::PPM_Single_Variable(cell_im2_characteristic.a6, cell_im1_characteristic.a6, cell_i_characteristic.a6,
cell_ip1_characteristic.a6, cell_ip2_characteristic.a6,
interface_L_iph_characteristic.a6, interface_R_imh_characteristic.a6);
#endif // MHD
// Convert back to primitive variables
reconstruction::Primitive interface_L_iph = reconstruction::Characteristic_To_Primitive(
cell_i, interface_L_iph_characteristic, eigenvectors, sound_speed, sound_speed_squared, gamma);
reconstruction::Primitive interface_R_imh = reconstruction::Characteristic_To_Primitive(
cell_i, interface_R_imh_characteristic, eigenvectors, sound_speed, sound_speed_squared, gamma);
// Compute the interfaces for the variables that don't have characteristics
#ifdef DE
reconstruction::PPM_Single_Variable(cell_im2.gas_energy, cell_im1.gas_energy, cell_i.gas_energy, cell_ip1.gas_energy,
cell_ip2.gas_energy, interface_L_iph.gas_energy, interface_R_imh.gas_energy);
#endif // DE
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
reconstruction::PPM_Single_Variable(cell_im2.scalar[i], cell_im1.scalar[i], cell_i.scalar[i], cell_ip1.scalar[i],
cell_ip2.scalar[i], interface_L_iph.scalar[i], interface_R_imh.scalar[i]);
}
#endif // SCALAR
// enforce minimum values
interface_R_imh.density = fmax(interface_R_imh.density, (Real)TINY_NUMBER);
interface_L_iph.density = fmax(interface_L_iph.density, (Real)TINY_NUMBER);
interface_R_imh.pressure = fmax(interface_R_imh.pressure, (Real)TINY_NUMBER);
interface_L_iph.pressure = fmax(interface_L_iph.pressure, (Real)TINY_NUMBER);
// Step 11 - Send final values back from kernel
// Convert the left and right states in the primitive to the conserved variables send final values back from kernel
// bounds_R refers to the right side of the i-1/2 interface
size_t id = cuda_utilities::compute1DIndex(xid, yid, zid, nx, ny);
reconstruction::Write_Data(interface_L_iph, dev_bounds_L, dev_conserved, id, n_cells, o1, o2, o3, gamma);
id = cuda_utilities::compute1DIndex(xid - int(dir == 0), yid - int(dir == 1), zid - int(dir == 2), nx, ny);
reconstruction::Write_Data(interface_R_imh, dev_bounds_R, dev_conserved, id, n_cells, o1, o2, o3, gamma);
}
// =====================================================================================================================
|
Subsets and Splits