hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
4422a4fd31cd18d587969fe20392a911014939cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "particle.h"
#include "kernel_hip.cuh"
#include <math.h>
#include <stdio.h>
__global__ void kernel_updateGalaxy( float *m, float3 *p,float3 *acceleration, float3 *p_out ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j;
float3 acc;
if (i<N_PARTICULE)
{
acc=make_float3(0.0f,0.0f,0.0f);
//printf("m=%f , px=%f\n", list[i].m, list[i].px);
for (j=0; j<N_PARTICULE; j++){
float dx,dy, dz;
float dist, coef;
dx=p[j].x-p[i].x;
dy=p[j].y-p[i].y;
dz=p[j].z-p[i].z;
dist = sqrtf(dx*dx+dy*dy+dz*dz);
if (dist < 1.0f)
dist= 1.0f;
coef= m[j] / (dist* dist * dist) ;
acc.x += dx * coef;
acc.y += dy * coef;
acc.z += dz * coef;
}
acceleration[i].x+=acc.x*TO* M;
acceleration[i].y+=acc.y*TO* M;
acceleration[i].z+=acc.z*TO* M;
p_out[i].x=acceleration[i].x*DT+p[i].x;
p_out[i].y=acceleration[i].y*DT+p[i].y;
p_out[i].z=acceleration[i].z*DT+p[i].z;
}
}
void updateGalaxy( int nblocks, int nthreads, float *m, float3 *p,float3 *acceleration, float3 *p_out ) {
hipLaunchKernelGGL(( kernel_updateGalaxy), dim3(nblocks), dim3(nthreads), 0, 0, m , p, acceleration, p_out );
}
| 4422a4fd31cd18d587969fe20392a911014939cb.cu | #include "cuda.h"
#include "particle.h"
#include "kernel.cuh"
#include <math.h>
#include <stdio.h>
__global__ void kernel_updateGalaxy( float *m, float3 *p,float3 *acceleration, float3 *p_out ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j;
float3 acc;
if (i<N_PARTICULE)
{
acc=make_float3(0.0f,0.0f,0.0f);
//printf("m=%f , px=%f\n", list[i].m, list[i].px);
for (j=0; j<N_PARTICULE; j++){
float dx,dy, dz;
float dist, coef;
dx=p[j].x-p[i].x;
dy=p[j].y-p[i].y;
dz=p[j].z-p[i].z;
dist = sqrtf(dx*dx+dy*dy+dz*dz);
if (dist < 1.0f)
dist= 1.0f;
coef= m[j] / (dist* dist * dist) ;
acc.x += dx * coef;
acc.y += dy * coef;
acc.z += dz * coef;
}
acceleration[i].x+=acc.x*TO* M;
acceleration[i].y+=acc.y*TO* M;
acceleration[i].z+=acc.z*TO* M;
p_out[i].x=acceleration[i].x*DT+p[i].x;
p_out[i].y=acceleration[i].y*DT+p[i].y;
p_out[i].z=acceleration[i].z*DT+p[i].z;
}
}
void updateGalaxy( int nblocks, int nthreads, float *m, float3 *p,float3 *acceleration, float3 *p_out ) {
kernel_updateGalaxy<<<nblocks, nthreads>>>(m , p, acceleration, p_out );
}
|
4ebae7ff99608f38147c9e054e157e3586e0bc55.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <cmath>
#define M 2048
#define W 15
#define w 3
#define threshold 80
using namespace std;
__global__ void smoothening_kernel(float* d_filter,float* d_raw_image,float* d_hx,float* d_hy,float* d_gx,float* d_gy,float* d_smooth_image,float* d_edged_image,int block_size){
int Bx = blockIdx.x;
int By = blockIdx.y;
int Tx = threadIdx.x;
int Ty = threadIdx.y;
/* defining row and column index tp parse through filters and image*/
int rowd = By* block_size + Ty;
int columd = Bx* block_size + Tx;
/*boundaries checking*/
int rr = rowd - W/2;
int cc = columd - W/2;
float acc = 0.0;
/*convolution for smmothening*/
for(int k = 0; k < W; k++ ){
for(int l = 0; l < W; l++){
if((rr + k) >= 0 && (rr + k) < M && (cc + l) >= 0 && (cc + l) < M){
acc += d_raw_image[(rr + k) * M + (cc + l)] * d_filter[k * W + l];
}
}
d_smooth_image[rowd * M + columd] = acc;
}
/*convolution for edge detection */
int mm = rowd - w/2;
int nn = columd - w/2;
float acc1 = 0.0;
float acc2 = 0.0;
for(int k = 0; k < w; k++ ){
for(int l = 0; l < w; l++){
if((mm + k) >= 0 && (mm + k) < M && (nn + l) >= 0 && (nn + l) < M){
acc1 += d_smooth_image[(mm + k) * M + (nn + l)] * d_hx[k * w + l];
acc2 += d_smooth_image[(mm + k) * M + (nn + l)] * d_hy[k * w + l];
}
}
d_gx[rowd * M + columd] = acc1;
d_gy[rowd * M + columd] = acc2;
}
// gradient magnitude of spatial domains
d_edged_image[rowd * M + columd] = sqrt(pow(d_gx[rowd * M + columd], 2) + pow(d_gy[rowd * M + columd], 2));
if(d_edged_image[rowd * M + columd] > threshold){d_edged_image[rowd * M + columd] = 255;}
else{d_edged_image[rowd * M + columd] = 0;}
}
int main(int argc, char* argv[]){
int block_size = atoi(argv[1]);
float h_filter[W][W]; //Gaussian filter
float h_x[w][w] = {{-1.0,0.0,1.0},{-2.0,0.0,2.0},{-1.0,0.0,1.0}}; // Sobel operator
float h_y[w][w] = {{-1.0,-2.0,-1.0},{0.0,0.0,0.0},{1.0,2.0,1.0}}; //Sobel operator
double sigma = 1.5;
float P = 1.0/(2* M_PI * sigma*sigma);
float Q = 2.0* M_PI * sigma*sigma;
float sum = 0.0;
long image_size;
size_t elements;
int L = (W-1)/2;
/*initializing gaussian filter*/
for(int x = -W/2; x <= W/2; x++){
for(int y = -W/2; y <= W/2; y++){
int I = (x+ W/2) - L;
int J = (y+ W/2) - L;
h_filter[x + W/2][y + W/2] = P*(exp(-(I*I + J*J)/Q));
sum += h_filter[x + W/2][y + W/2];
}
}
for(int i = 0; i < W; i++){
for(int j = 0; j < W; j++){
h_filter[i][j]/= sum;
}
}
// verify gaussian filter
cout<<"guassian filter" <<endl;
for(int q = 0; q < 15; q++){
for(int z = 0; z <15; z++){
cout<<h_filter[q][z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
FILE* fp_in, *fp_out1, *fp_out2;
fp_in = fopen ("Rainier2048_noise.bin","rb");
if(fp_in == NULL){cout<<"FILE ERROR!"<<endl;
exit(1); }
//obtain file size
fseek(fp_in, 0, SEEK_END);
image_size = ftell(fp_in);
rewind(fp_in);
// allocate buffer of image size
unsigned char* buffer = (unsigned char*)malloc(sizeof(unsigned char) * image_size);
unsigned char* buffer1 = (unsigned char*)malloc(sizeof(unsigned char) * image_size);
//copy file into buffer
elements = fread(buffer, sizeof(unsigned char), image_size, fp_in);
if(elements != image_size){cout<<"READ ERROR! "<<endl;
exit(2);}
fclose(fp_in);
float* fptr = (float*)malloc(sizeof(float)* M * M);
//typecast from char to float
for(int row = 0; row < M; row++){
for(int col = 0; col < M; col++){
fptr[row * M + col] = (float) buffer[row * M + col];
}
}
cout<<"raw image" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<buffer[q * M + z]<<" ";
}
cout<<endl;
}
cout<<"raw image of float type" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<fptr[q * M + z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
float* smooth_image = (float*)malloc(sizeof(float)* M * M);
float* edged_image = (float*)malloc(sizeof(float)* M * M);
float* d_gx;
float* d_gy;
float* d_hx;
float* d_hy;
float* d_raw_image;
float* d_filter;
float* d_smooth_image;
float* d_edged_image;
hipMalloc((void**)&d_hx,sizeof(float)* w * w);
hipMalloc((void**)&d_hy,sizeof(float)* w * w);
hipMalloc((void**)&d_filter,sizeof(float)* W * W);
hipMalloc((void**)&d_raw_image,sizeof(float)* M * M);
hipMalloc((void**)&d_smooth_image,sizeof(float)* M * M);
hipMalloc((void**)&d_edged_image,sizeof(float)* M * M);
hipMalloc((void**)&d_gx,sizeof(float)* M * M);
hipMalloc((void**)&d_gy,sizeof(float)* M * M);
/* measuring execution time */
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/*copy image and filters from host to device */
hipMemcpy(d_raw_image, fptr, sizeof(float) * M * M, hipMemcpyHostToDevice);
hipMemcpy(d_filter,h_filter , sizeof(float) * W * W, hipMemcpyHostToDevice);
hipMemcpy(d_hx, h_x , sizeof(float) * w * w, hipMemcpyHostToDevice);
hipMemcpy(d_hy, h_y , sizeof(float) * w * w, hipMemcpyHostToDevice);
/*define block size and grid size and invoke kernel*/
dim3 threadsPerBlock(block_size, block_size);
int numblocks = M / block_size;
dim3 blocksPerGrid(numblocks, numblocks);
hipEventRecord(start);
hipLaunchKernelGGL(( smoothening_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_filter,d_raw_image,d_hx,d_hy,d_gx,d_gy,d_smooth_image,d_edged_image,block_size);
/* copy results from device to host */
hipMemcpy(smooth_image, d_smooth_image, sizeof(float) * M * M, hipMemcpyDeviceToHost);
hipMemcpy(edged_image, d_edged_image, sizeof(float) * M * M, hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0.0;
hipEventElapsedTime(&milliseconds, start, stop);
cout<<"he parallel execution time for block size "<< block_size << " is "<< milliseconds <<" secs" << endl;
/* write edge detected image to file*/
for(int row = 0; row < M; row++){
for(int col = 0; col < M; col++){
buffer[row * M + col] = (unsigned char) smooth_image[row * M + col];
buffer1[row * M + col] = (unsigned char) edged_image[row * M + col];
}
}
cout<<"smoothened_image buffered"<<endl;
for(int ir = 1024; ir < 1034; ir++){
for(int ic = 1525; ic < 1535; ic++){
cout<< *(buffer + ir * M + ic) <<" ";
}
cout<<endl;
}
cout<<" "<<endl;
fp_out1 = fopen("smoothened_image_cuda.bin", "wb");
fwrite(buffer, sizeof(unsigned char), image_size, fp_out1);
fclose(fp_out1);
fp_out2 = fopen("Edge_detected_image_cuda.bin", "wb");
fwrite(buffer1,sizeof(unsigned char), image_size, fp_out2);
fclose(fp_out2);
cout<<"smoothened image" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<smooth_image[q * M + z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
cout<<"edged_image buffered"<<endl;
for(int ir = 1024; ir < 1034; ir++){
for(int ic = 1525; ic < 1535; ic++){
cout<< *(buffer1 + ir * M + ic) <<" ";
}
cout<<endl;
}
cout<<" "<<endl;
cout<<"edged_image" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<edged_image[q * M + z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
/* free device memory*/
hipFree(d_raw_image);
hipFree(d_hx);
hipFree(d_hy);
hipFree(d_smooth_image);
hipFree(d_edged_image);
hipFree(d_gx);
hipFree(d_gy);
hipFree(d_filter);
/*free host memory*/
delete[] fptr;
delete[] smooth_image;
delete[] buffer;
delete[] buffer1;
delete[] edged_image;
return 0;
}
| 4ebae7ff99608f38147c9e054e157e3586e0bc55.cu | #include <iostream>
#include <cstdlib>
#include <cuda.h>
#include <cmath>
#define M 2048
#define W 15
#define w 3
#define threshold 80
using namespace std;
__global__ void smoothening_kernel(float* d_filter,float* d_raw_image,float* d_hx,float* d_hy,float* d_gx,float* d_gy,float* d_smooth_image,float* d_edged_image,int block_size){
int Bx = blockIdx.x;
int By = blockIdx.y;
int Tx = threadIdx.x;
int Ty = threadIdx.y;
/* defining row and column index tp parse through filters and image*/
int rowd = By* block_size + Ty;
int columd = Bx* block_size + Tx;
/*boundaries checking*/
int rr = rowd - W/2;
int cc = columd - W/2;
float acc = 0.0;
/*convolution for smmothening*/
for(int k = 0; k < W; k++ ){
for(int l = 0; l < W; l++){
if((rr + k) >= 0 && (rr + k) < M && (cc + l) >= 0 && (cc + l) < M){
acc += d_raw_image[(rr + k) * M + (cc + l)] * d_filter[k * W + l];
}
}
d_smooth_image[rowd * M + columd] = acc;
}
/*convolution for edge detection */
int mm = rowd - w/2;
int nn = columd - w/2;
float acc1 = 0.0;
float acc2 = 0.0;
for(int k = 0; k < w; k++ ){
for(int l = 0; l < w; l++){
if((mm + k) >= 0 && (mm + k) < M && (nn + l) >= 0 && (nn + l) < M){
acc1 += d_smooth_image[(mm + k) * M + (nn + l)] * d_hx[k * w + l];
acc2 += d_smooth_image[(mm + k) * M + (nn + l)] * d_hy[k * w + l];
}
}
d_gx[rowd * M + columd] = acc1;
d_gy[rowd * M + columd] = acc2;
}
// gradient magnitude of spatial domains
d_edged_image[rowd * M + columd] = sqrt(pow(d_gx[rowd * M + columd], 2) + pow(d_gy[rowd * M + columd], 2));
if(d_edged_image[rowd * M + columd] > threshold){d_edged_image[rowd * M + columd] = 255;}
else{d_edged_image[rowd * M + columd] = 0;}
}
int main(int argc, char* argv[]){
int block_size = atoi(argv[1]);
float h_filter[W][W]; //Gaussian filter
float h_x[w][w] = {{-1.0,0.0,1.0},{-2.0,0.0,2.0},{-1.0,0.0,1.0}}; // Sobel operator
float h_y[w][w] = {{-1.0,-2.0,-1.0},{0.0,0.0,0.0},{1.0,2.0,1.0}}; //Sobel operator
double sigma = 1.5;
float P = 1.0/(2* M_PI * sigma*sigma);
float Q = 2.0* M_PI * sigma*sigma;
float sum = 0.0;
long image_size;
size_t elements;
int L = (W-1)/2;
/*initializing gaussian filter*/
for(int x = -W/2; x <= W/2; x++){
for(int y = -W/2; y <= W/2; y++){
int I = (x+ W/2) - L;
int J = (y+ W/2) - L;
h_filter[x + W/2][y + W/2] = P*(exp(-(I*I + J*J)/Q));
sum += h_filter[x + W/2][y + W/2];
}
}
for(int i = 0; i < W; i++){
for(int j = 0; j < W; j++){
h_filter[i][j]/= sum;
}
}
// verify gaussian filter
cout<<"guassian filter" <<endl;
for(int q = 0; q < 15; q++){
for(int z = 0; z <15; z++){
cout<<h_filter[q][z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
FILE* fp_in, *fp_out1, *fp_out2;
fp_in = fopen ("Rainier2048_noise.bin","rb");
if(fp_in == NULL){cout<<"FILE ERROR!"<<endl;
exit(1); }
//obtain file size
fseek(fp_in, 0, SEEK_END);
image_size = ftell(fp_in);
rewind(fp_in);
// allocate buffer of image size
unsigned char* buffer = (unsigned char*)malloc(sizeof(unsigned char) * image_size);
unsigned char* buffer1 = (unsigned char*)malloc(sizeof(unsigned char) * image_size);
//copy file into buffer
elements = fread(buffer, sizeof(unsigned char), image_size, fp_in);
if(elements != image_size){cout<<"READ ERROR! "<<endl;
exit(2);}
fclose(fp_in);
float* fptr = (float*)malloc(sizeof(float)* M * M);
//typecast from char to float
for(int row = 0; row < M; row++){
for(int col = 0; col < M; col++){
fptr[row * M + col] = (float) buffer[row * M + col];
}
}
cout<<"raw image" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<buffer[q * M + z]<<" ";
}
cout<<endl;
}
cout<<"raw image of float type" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<fptr[q * M + z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
float* smooth_image = (float*)malloc(sizeof(float)* M * M);
float* edged_image = (float*)malloc(sizeof(float)* M * M);
float* d_gx;
float* d_gy;
float* d_hx;
float* d_hy;
float* d_raw_image;
float* d_filter;
float* d_smooth_image;
float* d_edged_image;
cudaMalloc((void**)&d_hx,sizeof(float)* w * w);
cudaMalloc((void**)&d_hy,sizeof(float)* w * w);
cudaMalloc((void**)&d_filter,sizeof(float)* W * W);
cudaMalloc((void**)&d_raw_image,sizeof(float)* M * M);
cudaMalloc((void**)&d_smooth_image,sizeof(float)* M * M);
cudaMalloc((void**)&d_edged_image,sizeof(float)* M * M);
cudaMalloc((void**)&d_gx,sizeof(float)* M * M);
cudaMalloc((void**)&d_gy,sizeof(float)* M * M);
/* measuring execution time */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*copy image and filters from host to device */
cudaMemcpy(d_raw_image, fptr, sizeof(float) * M * M, cudaMemcpyHostToDevice);
cudaMemcpy(d_filter,h_filter , sizeof(float) * W * W, cudaMemcpyHostToDevice);
cudaMemcpy(d_hx, h_x , sizeof(float) * w * w, cudaMemcpyHostToDevice);
cudaMemcpy(d_hy, h_y , sizeof(float) * w * w, cudaMemcpyHostToDevice);
/*define block size and grid size and invoke kernel*/
dim3 threadsPerBlock(block_size, block_size);
int numblocks = M / block_size;
dim3 blocksPerGrid(numblocks, numblocks);
cudaEventRecord(start);
smoothening_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_filter,d_raw_image,d_hx,d_hy,d_gx,d_gy,d_smooth_image,d_edged_image,block_size);
/* copy results from device to host */
cudaMemcpy(smooth_image, d_smooth_image, sizeof(float) * M * M, cudaMemcpyDeviceToHost);
cudaMemcpy(edged_image, d_edged_image, sizeof(float) * M * M, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0.0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout<<"he parallel execution time for block size "<< block_size << " is "<< milliseconds <<" secs" << endl;
/* write edge detected image to file*/
for(int row = 0; row < M; row++){
for(int col = 0; col < M; col++){
buffer[row * M + col] = (unsigned char) smooth_image[row * M + col];
buffer1[row * M + col] = (unsigned char) edged_image[row * M + col];
}
}
cout<<"smoothened_image buffered"<<endl;
for(int ir = 1024; ir < 1034; ir++){
for(int ic = 1525; ic < 1535; ic++){
cout<< *(buffer + ir * M + ic) <<" ";
}
cout<<endl;
}
cout<<" "<<endl;
fp_out1 = fopen("smoothened_image_cuda.bin", "wb");
fwrite(buffer, sizeof(unsigned char), image_size, fp_out1);
fclose(fp_out1);
fp_out2 = fopen("Edge_detected_image_cuda.bin", "wb");
fwrite(buffer1,sizeof(unsigned char), image_size, fp_out2);
fclose(fp_out2);
cout<<"smoothened image" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<smooth_image[q * M + z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
cout<<"edged_image buffered"<<endl;
for(int ir = 1024; ir < 1034; ir++){
for(int ic = 1525; ic < 1535; ic++){
cout<< *(buffer1 + ir * M + ic) <<" ";
}
cout<<endl;
}
cout<<" "<<endl;
cout<<"edged_image" <<endl;
for(int q = 1024; q < 1034; q++){
for(int z = 1525; z <1535; z++){
cout<<edged_image[q * M + z]<<" ";
}
cout<<endl;
}
cout<<" "<<endl;
/* free device memory*/
cudaFree(d_raw_image);
cudaFree(d_hx);
cudaFree(d_hy);
cudaFree(d_smooth_image);
cudaFree(d_edged_image);
cudaFree(d_gx);
cudaFree(d_gy);
cudaFree(d_filter);
/*free host memory*/
delete[] fptr;
delete[] smooth_image;
delete[] buffer;
delete[] buffer1;
delete[] edged_image;
return 0;
}
|
bc90b66861b5db6239d1e76002a3982ed9aaf0a6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_utils.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <vector>
#include "kmeans/kmeans.cu"
namespace ML {
using namespace MLCommon;
template <typename T>
struct KmeansInputs {
int n_clusters;
T tol;
int n_row;
int n_col;
};
template <typename T>
class KmeansTest : public ::testing::TestWithParam<KmeansInputs<T>> {
protected:
void basicTest() {
params = ::testing::TestWithParam<KmeansInputs<T>>::GetParam();
int m = params.n_row;
int n = params.n_col;
int k = params.n_clusters;
// make space for outputs : d_centroids, d_labels
// and reference output : d_labels_ref
allocate(d_srcdata, n * m);
allocate(d_labels, m);
allocate(d_labels_ref, m);
allocate(d_centroids, k * n);
allocate(d_centroids_ref, k * n);
// make testdata on host
std::vector<T> h_srcdata = {1.0, 1.0, 3.0, 4.0, 1.0, 2.0, 2.0, 3.0};
h_srcdata.resize(n * m);
updateDevice(d_srcdata, h_srcdata.data(), m * n, stream);
// make and assign reference output
std::vector<int> h_labels_ref = {0, 1, 0, 1};
h_labels_ref.resize(m);
updateDevice(d_labels_ref, h_labels_ref.data(), m, stream);
std::vector<T> h_centroids_ref = {1.0, 1.5, 2.5, 3.5};
h_centroids_ref.resize(k * n);
updateDevice(d_centroids_ref, h_centroids_ref.data(), k * n, stream);
cumlHandle handle;
handle.setStream(stream);
// The actual kmeans api calls
// fit
kmeans::fit_predict(handle, k, metric, init, max_iterations, params.tol,
seed, d_srcdata, m, n, d_centroids, d_labels);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
basicTest();
}
void TearDown() override {
CUDA_CHECK(hipFree(d_srcdata));
CUDA_CHECK(hipFree(d_labels));
CUDA_CHECK(hipFree(d_centroids));
CUDA_CHECK(hipFree(d_labels_ref));
CUDA_CHECK(hipFree(d_centroids_ref));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
KmeansInputs<T> params;
T *d_srcdata;
int *d_labels, *d_labels_ref;
T *d_centroids, *d_centroids_ref;
int verbose = 0;
int seed = 0;
int max_iterations = 300;
kmeans::InitMethod init = kmeans::InitMethod::Random;
int metric = 1;
hipStream_t stream;
};
const std::vector<KmeansInputs<float>> inputsf2 = {{2, 0.05f, 4, 2}};
const std::vector<KmeansInputs<double>> inputsd2 = {{2, 0.05, 4, 2}};
// FIXME: These tests are disabled due to being too sensitive to RNG:
// https://github.com/rapidsai/cuml/issues/71
typedef KmeansTest<float> KmeansTestF;
TEST_P(KmeansTestF, Result) {
ASSERT_TRUE(devArrMatch(d_labels_ref, d_labels, params.n_row,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(devArrMatch(d_centroids_ref, d_centroids,
params.n_clusters * params.n_col,
CompareApproxAbs<float>(params.tol)));
}
typedef KmeansTest<double> KmeansTestD;
TEST_P(KmeansTestD, Result) {
ASSERT_TRUE(devArrMatch(d_labels_ref, d_labels, params.n_row,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(devArrMatch(d_centroids_ref, d_centroids,
params.n_clusters * params.n_col,
CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestD,
::testing::ValuesIn(inputsd2));
} // end namespace ML
| bc90b66861b5db6239d1e76002a3982ed9aaf0a6.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_utils.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <vector>
#include "kmeans/kmeans.cu"
namespace ML {
using namespace MLCommon;
template <typename T>
struct KmeansInputs {
int n_clusters;
T tol;
int n_row;
int n_col;
};
template <typename T>
class KmeansTest : public ::testing::TestWithParam<KmeansInputs<T>> {
protected:
void basicTest() {
params = ::testing::TestWithParam<KmeansInputs<T>>::GetParam();
int m = params.n_row;
int n = params.n_col;
int k = params.n_clusters;
// make space for outputs : d_centroids, d_labels
// and reference output : d_labels_ref
allocate(d_srcdata, n * m);
allocate(d_labels, m);
allocate(d_labels_ref, m);
allocate(d_centroids, k * n);
allocate(d_centroids_ref, k * n);
// make testdata on host
std::vector<T> h_srcdata = {1.0, 1.0, 3.0, 4.0, 1.0, 2.0, 2.0, 3.0};
h_srcdata.resize(n * m);
updateDevice(d_srcdata, h_srcdata.data(), m * n, stream);
// make and assign reference output
std::vector<int> h_labels_ref = {0, 1, 0, 1};
h_labels_ref.resize(m);
updateDevice(d_labels_ref, h_labels_ref.data(), m, stream);
std::vector<T> h_centroids_ref = {1.0, 1.5, 2.5, 3.5};
h_centroids_ref.resize(k * n);
updateDevice(d_centroids_ref, h_centroids_ref.data(), k * n, stream);
cumlHandle handle;
handle.setStream(stream);
// The actual kmeans api calls
// fit
kmeans::fit_predict(handle, k, metric, init, max_iterations, params.tol,
seed, d_srcdata, m, n, d_centroids, d_labels);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
basicTest();
}
void TearDown() override {
CUDA_CHECK(cudaFree(d_srcdata));
CUDA_CHECK(cudaFree(d_labels));
CUDA_CHECK(cudaFree(d_centroids));
CUDA_CHECK(cudaFree(d_labels_ref));
CUDA_CHECK(cudaFree(d_centroids_ref));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
KmeansInputs<T> params;
T *d_srcdata;
int *d_labels, *d_labels_ref;
T *d_centroids, *d_centroids_ref;
int verbose = 0;
int seed = 0;
int max_iterations = 300;
kmeans::InitMethod init = kmeans::InitMethod::Random;
int metric = 1;
cudaStream_t stream;
};
const std::vector<KmeansInputs<float>> inputsf2 = {{2, 0.05f, 4, 2}};
const std::vector<KmeansInputs<double>> inputsd2 = {{2, 0.05, 4, 2}};
// FIXME: These tests are disabled due to being too sensitive to RNG:
// https://github.com/rapidsai/cuml/issues/71
typedef KmeansTest<float> KmeansTestF;
TEST_P(KmeansTestF, Result) {
ASSERT_TRUE(devArrMatch(d_labels_ref, d_labels, params.n_row,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(devArrMatch(d_centroids_ref, d_centroids,
params.n_clusters * params.n_col,
CompareApproxAbs<float>(params.tol)));
}
typedef KmeansTest<double> KmeansTestD;
TEST_P(KmeansTestD, Result) {
ASSERT_TRUE(devArrMatch(d_labels_ref, d_labels, params.n_row,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(devArrMatch(d_centroids_ref, d_centroids,
params.n_clusters * params.n_col,
CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestD,
::testing::ValuesIn(inputsd2));
} // end namespace ML
|
194dfc5e2db011f1a30512636d138c12ba64bc36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_x;
int xdim0_initialise_chunk_kernel_x_h = -1;
__constant__ int xdim1_initialise_chunk_kernel_x;
int xdim1_initialise_chunk_kernel_x_h = -1;
__constant__ int xdim2_initialise_chunk_kernel_x;
int xdim2_initialise_chunk_kernel_x_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x,y) (x+xdim0_initialise_chunk_kernel_x*(y))
#define OPS_ACC1(x,y) (x+xdim1_initialise_chunk_kernel_x*(y))
#define OPS_ACC2(x,y) (x+xdim2_initialise_chunk_kernel_x*(y))
//user function
__device__
void initialise_chunk_kernel_x_gpu(double *vertexx, const int *xx, double *vertexdx) {
int x_min=field.x_min-2;
double min_x, d_x;
d_x = (grid.xmax - grid.xmin)/(double)grid.x_cells;
min_x=grid.xmin+d_x*field.left;
vertexx[OPS_ACC0(0,0)] = min_x + d_x * (xx[OPS_ACC1(0,0)] - x_min);
vertexdx[OPS_ACC2(0,0)] = (double)d_x;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_initialise_chunk_kernel_x(
double* __restrict arg0,
const int* __restrict arg1,
double* __restrict arg2,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 0*1 * xdim0_initialise_chunk_kernel_x;
arg1 += idx_x * 1*1 + idx_y * 0*1 * xdim1_initialise_chunk_kernel_x;
arg2 += idx_x * 1*1 + idx_y * 0*1 * xdim2_initialise_chunk_kernel_x;
if (idx_x < size0 && idx_y < size1) {
initialise_chunk_kernel_x_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_x(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_initialise_chunk_kernel_x_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,10)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(10,"initialise_chunk_kernel_x");
OPS_kernels[10].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
if (xdim0 != xdim0_initialise_chunk_kernel_x_h || xdim1 != xdim1_initialise_chunk_kernel_x_h || xdim2 != xdim2_initialise_chunk_kernel_x_h) {
hipMemcpyToSymbol( xdim0_initialise_chunk_kernel_x, &xdim0, sizeof(int) );
xdim0_initialise_chunk_kernel_x_h = xdim0;
hipMemcpyToSymbol( xdim1_initialise_chunk_kernel_x, &xdim1, sizeof(int) );
xdim1_initialise_chunk_kernel_x_h = xdim1;
hipMemcpyToSymbol( xdim2_initialise_chunk_kernel_x, &xdim2, sizeof(int) );
xdim2_initialise_chunk_kernel_x_h = xdim2;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[10].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_initialise_chunk_kernel_x), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (int *)p_a[1],
(double *)p_a[2],x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[10].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[10].mpi_time += t2-t1;
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
#ifdef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_x(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 10;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 10;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->function = ops_par_loop_initialise_chunk_kernel_x_execute;
if (OPS_diags > 1) {
ops_timing_realloc(10,"initialise_chunk_kernel_x");
}
ops_enqueue_kernel(desc);
}
#endif
| 194dfc5e2db011f1a30512636d138c12ba64bc36.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_x;
int xdim0_initialise_chunk_kernel_x_h = -1;
__constant__ int xdim1_initialise_chunk_kernel_x;
int xdim1_initialise_chunk_kernel_x_h = -1;
__constant__ int xdim2_initialise_chunk_kernel_x;
int xdim2_initialise_chunk_kernel_x_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x,y) (x+xdim0_initialise_chunk_kernel_x*(y))
#define OPS_ACC1(x,y) (x+xdim1_initialise_chunk_kernel_x*(y))
#define OPS_ACC2(x,y) (x+xdim2_initialise_chunk_kernel_x*(y))
//user function
__device__
void initialise_chunk_kernel_x_gpu(double *vertexx, const int *xx, double *vertexdx) {
int x_min=field.x_min-2;
double min_x, d_x;
d_x = (grid.xmax - grid.xmin)/(double)grid.x_cells;
min_x=grid.xmin+d_x*field.left;
vertexx[OPS_ACC0(0,0)] = min_x + d_x * (xx[OPS_ACC1(0,0)] - x_min);
vertexdx[OPS_ACC2(0,0)] = (double)d_x;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_initialise_chunk_kernel_x(
double* __restrict arg0,
const int* __restrict arg1,
double* __restrict arg2,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 0*1 * xdim0_initialise_chunk_kernel_x;
arg1 += idx_x * 1*1 + idx_y * 0*1 * xdim1_initialise_chunk_kernel_x;
arg2 += idx_x * 1*1 + idx_y * 0*1 * xdim2_initialise_chunk_kernel_x;
if (idx_x < size0 && idx_y < size1) {
initialise_chunk_kernel_x_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_x(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_initialise_chunk_kernel_x_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,10)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(10,"initialise_chunk_kernel_x");
OPS_kernels[10].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
if (xdim0 != xdim0_initialise_chunk_kernel_x_h || xdim1 != xdim1_initialise_chunk_kernel_x_h || xdim2 != xdim2_initialise_chunk_kernel_x_h) {
cudaMemcpyToSymbol( xdim0_initialise_chunk_kernel_x, &xdim0, sizeof(int) );
xdim0_initialise_chunk_kernel_x_h = xdim0;
cudaMemcpyToSymbol( xdim1_initialise_chunk_kernel_x, &xdim1, sizeof(int) );
xdim1_initialise_chunk_kernel_x_h = xdim1;
cudaMemcpyToSymbol( xdim2_initialise_chunk_kernel_x, &xdim2, sizeof(int) );
xdim2_initialise_chunk_kernel_x_h = xdim2;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[10].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_initialise_chunk_kernel_x<<<grid, tblock >>> ( (double *)p_a[0], (int *)p_a[1],
(double *)p_a[2],x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[10].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[10].mpi_time += t2-t1;
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[10].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
#ifdef OPS_LAZY
void ops_par_loop_initialise_chunk_kernel_x(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 10;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 10;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->function = ops_par_loop_initialise_chunk_kernel_x_execute;
if (OPS_diags > 1) {
ops_timing_realloc(10,"initialise_chunk_kernel_x");
}
ops_enqueue_kernel(desc);
}
#endif
|
bee0fff7d48d2b86cff21b3d6d58a73bbe5b2072.hip | // !!! This is a file automatically generated by hipify!!!
#include <moderngpu/transform.hxx> // for transform.
#include <cstdio>
#include <time.h>
int main(int argc, char** argv) {
// Create an instance of an object that implements context_t.
// context_t is an abstract base class that wraps basic CUDA runtime
// services like hipMalloc and hipFree.
// standard_context_t is the trivial implementation of this abstract base
// class. You can derive context_t and hook it up to your own memory
// allocators, as CUDA's built-in allocator is very slow.
mgpu::standard_context_t context;
// Print the local time from GPU threads.
time_t cur_time;
time(&cur_time);
tm t = *localtime(&cur_time);
// Define a CUDA kernel with closure. Tag it with MGPU_DEVICE and compile
// with --expt-extended-lambda in CUDA 7.5 to run it on the GPU.
auto k = [=] MGPU_DEVICE(int index) {
// This gets run on the GPU. Simply by referencing t.tm_year inside
// the lambda, the time is copied from its enclosing scope on the host
// into GPU constant memory and made available to the kernel.
// Adjust for daylight savings.
int hour = (t.tm_hour + (t.tm_isdst ? 0 : 11)) % 12;
if(!hour) hour = 12;
// Use CUDA's printf. It won't be shown until the context.synchronize()
// is called.
printf("Thread %d says the year is %d. The time is %d:%2d.\n",
index, 1900 + t.tm_year, hour, t.tm_min);
};
// Run kernel k with 10 GPU threads. We could even define the lambda
// inside the first argument of transform and not even name it.
mgpu::transform(k, 10, context);
// Synchronize the device to print the output.
context.synchronize();
return 0;
} | bee0fff7d48d2b86cff21b3d6d58a73bbe5b2072.cu | #include <moderngpu/transform.hxx> // for transform.
#include <cstdio>
#include <time.h>
int main(int argc, char** argv) {
// Create an instance of an object that implements context_t.
// context_t is an abstract base class that wraps basic CUDA runtime
// services like cudaMalloc and cudaFree.
// standard_context_t is the trivial implementation of this abstract base
// class. You can derive context_t and hook it up to your own memory
// allocators, as CUDA's built-in allocator is very slow.
mgpu::standard_context_t context;
// Print the local time from GPU threads.
time_t cur_time;
time(&cur_time);
tm t = *localtime(&cur_time);
// Define a CUDA kernel with closure. Tag it with MGPU_DEVICE and compile
// with --expt-extended-lambda in CUDA 7.5 to run it on the GPU.
auto k = [=] MGPU_DEVICE(int index) {
// This gets run on the GPU. Simply by referencing t.tm_year inside
// the lambda, the time is copied from its enclosing scope on the host
// into GPU constant memory and made available to the kernel.
// Adjust for daylight savings.
int hour = (t.tm_hour + (t.tm_isdst ? 0 : 11)) % 12;
if(!hour) hour = 12;
// Use CUDA's printf. It won't be shown until the context.synchronize()
// is called.
printf("Thread %d says the year is %d. The time is %d:%2d.\n",
index, 1900 + t.tm_year, hour, t.tm_min);
};
// Run kernel k with 10 GPU threads. We could even define the lambda
// inside the first argument of transform and not even name it.
mgpu::transform(k, 10, context);
// Synchronize the device to print the output.
context.synchronize();
return 0;
} |
7ba5ef7a332871d8f4ce2e5d2e27f2afce164d6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "sampling_gpu.h"
#define TOTAL_THREADS 1024
inline int opt_n_threads(int work_size) {
const int pow_2 = ::log(static_cast<double>(work_size)) / ::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(int b, int n, int m,
const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
// float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
// if (mag <= 1e-3)
// continue;
float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
}
}
void furthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp, int *idxs, hipStream_t stream) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
hipError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1024>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
case 512:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
case 256:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
case 128:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
case 64:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
case 32:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
case 16:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
case 8:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
case 4:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
case 2:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
case 1:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break;
default:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
}
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 7ba5ef7a332871d8f4ce2e5d2e27f2afce164d6b.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "sampling_gpu.h"
#define TOTAL_THREADS 1024
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(int b, int n, int m,
const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0)
idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
// float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
// if (mag <= 1e-3)
// continue;
float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0)
idxs[j] = old;
}
}
void furthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp, int *idxs, cudaStream_t stream) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
furthest_point_sampling_kernel<1024><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 512:
furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 256:
furthest_point_sampling_kernel<256><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 128:
furthest_point_sampling_kernel<128><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 64:
furthest_point_sampling_kernel<64><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 32:
furthest_point_sampling_kernel<32><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 16:
furthest_point_sampling_kernel<16><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 8:
furthest_point_sampling_kernel<8><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 4:
furthest_point_sampling_kernel<4><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 2:
furthest_point_sampling_kernel<2><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
case 1:
furthest_point_sampling_kernel<1><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
default:
furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
rt_common.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2009-2010 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Common functionality for ray tracing based framework specializations.
"Massively Parallel Hierarchical Scene Sorting with Applications in Rendering",
Marek Vinkler, Michal Hapala, Jiri Bittner and Vlastimil Havran,
Computer Graphics Forum 2012
*/
#pragma once
#include "rt_common.cuh"
#include "tri_box_overlap.cuh"
//------------------------------------------------------------------------
// Returns the right TaskType based on the SCAN_TYPE
__device__ __forceinline__ TaskType taskChooseScanType(int unfinished)
{
#if SCAN_TYPE == 0
return TaskType_Sort_PPS1;
#elif SCAN_TYPE == 1
if(unfinished < 8) // Value of 8 corresponds to 256 items where there is a crossover between naive and Harris
return TaskType_Sort_PPS1;
else
return TaskType_Sort_PPS1_Up;
#elif SCAN_TYPE == 2 || SCAN_TYPE == 3
return TaskType_Sort_SORT1;
#else
#error Unknown SCAN_TYPE!
#endif
}
//------------------------------------------------------------------------
// Returns the right TaskType based on the SCAN_TYPE for PPS1
__device__ __forceinline__ TaskType taskChooseScanType1()
{
#if SCAN_TYPE == 0
return TaskType_Sort_PPS1;
#elif SCAN_TYPE == 1
return TaskType_Sort_PPS1_Up;
#elif SCAN_TYPE == 2 || SCAN_TYPE == 3
return TaskType_Sort_SORT1;
#else
#error Unknown SCAN_TYPE!
#endif
}
//------------------------------------------------------------------------
// Returns the right TaskType based on the AABB_TYPE
__device__ __forceinline__ TaskType taskChooseAABBType()
{
#if AABB_TYPE < 3
return TaskType_AABB_Min;
#elif AABB_TYPE == 3
return TaskType_AABB;
#endif
}
//------------------------------------------------------------------------
// Returns the right TaskType based on the SCAN_TYPE for PPS1
__device__ __forceinline__ TaskType taskChooseScanType2()
{
#if SCAN_TYPE == 0
return TaskType_Sort_PPS2;
#elif SCAN_TYPE == 1
return TaskType_Sort_PPS2_Up;
#elif SCAN_TYPE == 2 || SCAN_TYPE == 3
return TaskType_Sort_SORT2;
#else
#error Unknown SCAN_TYPE!
#endif
}
//------------------------------------------------------------------------
// Fetches ray from global memory
__device__ __forceinline__ void taskFetchRay(hipDeviceptr_t rays, int rayIdx, float3 &orig, float3 &dir, float &tmin, float &tmax)
{
float4 o = *((float4*)(rays + rayIdx * 32 + 0));
float4 d = *((float4*)(rays + rayIdx * 32 + 16));
orig = make_float3(o);
tmin = o.w;
dir = make_float3(d);
tmax = d.w;
}
//------------------------------------------------------------------------
// Fetches ray from global memory
__device__ __forceinline__ void taskFetchRayVolatile(hipDeviceptr_t rays, int rayIdx, float3 &orig, float3 &dir, float &tmin, float &tmax)
{
// We must read data as volatile or we can get deprected data
volatile float4 *po = (volatile float4*)(rays + rayIdx * 32 + 0);
volatile float4 *pd = (volatile float4*)(rays + rayIdx * 32 + 16);
orig.x = po->x, orig.y = po->y, orig.z = po->z;
dir.x = pd->x, dir.y = pd->y, dir.z = pd->z;
tmin = po->w;
tmax = pd->w;
}
//------------------------------------------------------------------------
// Fetches triangle from global memory
__device__ __forceinline__ void taskFetchTri(hipDeviceptr_t tris, int triIdx, float3 &v0, float3 &v1, float3 &v2)
{
#if 1
v0 = make_float3(tex1Dfetch(t_trisA, triIdx + 0));
v1 = make_float3(tex1Dfetch(t_trisA, triIdx + 1));
v2 = make_float3(tex1Dfetch(t_trisA, triIdx + 2));
#elif 0
v0 = make_float3(((float4*)tris)[triIdx + 0]);
v1 = make_float3(((float4*)tris)[triIdx + 1]);
v2 = make_float3(((float4*)tris)[triIdx + 2]);
#else
v0 = make_float3(*(float4*)&(((volatile float4*)tris)[triIdx + 0]));
v1 = make_float3(*(float4*)&(((volatile float4*)tris)[triIdx + 1]));
v2 = make_float3(*(float4*)&(((volatile float4*)tris)[triIdx + 2]));
#endif
}
//------------------------------------------------------------------------
// Fetches node from global memory
__device__ __forceinline__ void taskFetchNodeAddr(hipDeviceptr_t nodes, int nodeIdx, CudaBVHNode &node)
{
#if 0
// We must read data as volatile or we can get deprected data
volatile float4 *vc0xy = (volatile float4*)(nodes + nodeIdx + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
volatile float4 *vc1xy = (volatile float4*)(nodes + nodeIdx + 16); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
volatile float4 *vc01z = (volatile float4*)(nodes + nodeIdx + 32); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
volatile int4 *vchildren = (volatile int4*)(nodes + nodeIdx + 48); // (leftAddr, rightAddr, parentAddr, buildState)
node.c0xy.x = vc0xy->x, node.c0xy.y = vc0xy->y, node.c0xy.z = vc0xy->z, node.c0xy.w = vc0xy->w;
node.c1xy.x = vc1xy->x, node.c1xy.y = vc1xy->y, node.c1xy.z = vc1xy->z, node.c1xy.w = vc1xy->w;
node.c01z.x = vc01z->x, node.c01z.y = vc01z->y, node.c01z.z = vc01z->z, node.c01z.w = vc01z->w;
node.children.x = vchildren->x, node.children.y = vchildren->y, node.children.z = vchildren->z, node.children.w = vchildren->w;
#elif 0
hipDeviceptr_t addr = (nodes + nodeIdx);
asm("{\n\t"
"ld.volatile.v4.f32\t{%0, %1, %2, %3}, [%16];\n\t"
"ld.volatile.v4.f32\t{%4, %5, %6, %7}, [%16+16];\n\t"
"ld.volatile.v4.f32\t{%8, %9, %10, %11}, [%16+32];\n\t"
"ld.volatile.v4.u32\t{%12, %13, %14, %15}, [%16+48];\n\t"
"}"
: "=f"(node.c0xy.x), "=f"(node.c0xy.y), "=f"(node.c0xy.z), "=f"(node.c0xy.w),
"=f"(node.c1xy.x), "=f"(node.c1xy.y), "=f"(node.c1xy.z), "=f"(node.c1xy.w),
"=f"(node.c01z.x), "=f"(node.c01z.y), "=f"(node.c01z.z), "=f"(node.c01z.w),
"=r"(node.children.x), "=r"(node.children.y), "=r"(node.children.z), "=r"(node.children.w) : "r"(addr));
#elif 0 // Must be used with -Xptxas -dlcm=cg for correctness
node.c0xy = *((float4*)(nodes + nodeIdx + 0)); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
node.c1xy = *((float4*)(nodes + nodeIdx + 16)); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
node.c01z = *((float4*)(nodes + nodeIdx + 32)); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
node.children = *((int4*)(nodes + nodeIdx + 48)); // (leftAddr, rightAddr, parentAddr, buildState)
#else
node.c0xy = tex1Dfetch(t_nodesA, nodeIdx/16+0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
node.c1xy = tex1Dfetch(t_nodesA, nodeIdx/16+1); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
node.c01z = tex1Dfetch(t_nodesA, nodeIdx/16+2); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 temp = tex1Dfetch(t_nodesA, nodeIdx/16+3);
node.children.x =__float_as_int(temp.x);
node.children.y =__float_as_int(temp.y);
node.children.z =__float_as_int(temp.z);
node.children.w =__float_as_int(temp.w);
#endif
}
//------------------------------------------------------------------------
// Fetches node from global memory
__device__ __forceinline__ void taskFetchNode(hipDeviceptr_t nodes, int nodeIdx, CudaBVHNode &node)
{
#if 0
// We must read data as volatile or we can get deprected data
volatile float4 *vc0xy = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
volatile float4 *vc1xy = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 16); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
volatile float4 *vc01z = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 32); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
volatile int4 *vchildren = (volatile int4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 48); // (leftAddr, rightAddr, parentAddr, buildState)
node.c0xy.x = vc0xy->x, node.c0xy.y = vc0xy->y, node.c0xy.z = vc0xy->z, node.c0xy.w = vc0xy->w;
node.c1xy.x = vc1xy->x, node.c1xy.y = vc1xy->y, node.c1xy.z = vc1xy->z, node.c1xy.w = vc1xy->w;
node.c01z.x = vc01z->x, node.c01z.y = vc01z->y, node.c01z.z = vc01z->z, node.c01z.w = vc01z->w;
node.children.x = vchildren->x, node.children.y = vchildren->y, node.children.z = vchildren->z, node.children.w = vchildren->w;
#elif 0
hipDeviceptr_t addr = (nodes + nodeIdx * sizeof(CudaBVHNode));
asm("{\n\t"
"ld.volatile.v4.f32\t{%0, %1, %2, %3}, [%16];\n\t"
"ld.volatile.v4.f32\t{%4, %5, %6, %7}, [%16+16];\n\t"
"ld.volatile.v4.f32\t{%8, %9, %10, %11}, [%16+32];\n\t"
"ld.volatile.v4.u32\t{%12, %13, %14, %15}, [%16+48];\n\t"
"}"
: "=f"(node.c0xy.x), "=f"(node.c0xy.y), "=f"(node.c0xy.z), "=f"(node.c0xy.w),
"=f"(node.c1xy.x), "=f"(node.c1xy.y), "=f"(node.c1xy.z), "=f"(node.c1xy.w),
"=f"(node.c01z.x), "=f"(node.c01z.y), "=f"(node.c01z.z), "=f"(node.c01z.w),
"=r"(node.children.x), "=r"(node.children.y), "=r"(node.children.z), "=r"(node.children.w) : "r"(addr));
#elif 0 // Incorrect for some volativity reason
node.c0xy = *((float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 0)); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
node.c1xy = *((float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 16)); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
node.c01z = *((float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 32)); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
node.children = *((int4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 48)); // (leftAddr, rightAddr, parentAddr, buildState)
#else
node.c0xy = tex1Dfetch(t_nodesA, nodeIdx*4+0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
node.c1xy = tex1Dfetch(t_nodesA, nodeIdx*4+1); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
node.c01z = tex1Dfetch(t_nodesA, nodeIdx*4+2); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 temp = tex1Dfetch(t_nodesA, nodeIdx*4+3);
node.children.x =__float_as_int(temp.x);
node.children.y =__float_as_int(temp.y);
node.children.z =__float_as_int(temp.z);
node.children.w =__float_as_int(temp.w);
#endif
}
//------------------------------------------------------------------------
// Fetches node from global memory
__device__ __forceinline__ void taskFetchNodeVolatile(hipDeviceptr_t nodes, int nodeIdx, CudaBVHNode &node)
{
// We must read data as volatile or we can get deprected data
volatile float4 *vc0xy = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
volatile float4 *vc1xy = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 16); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
volatile float4 *vc01z = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 32); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
volatile int4 *vchildren = (volatile int4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 48); // (leftAddr, rightAddr, parentAddr, buildState)
node.c0xy.x = vc0xy->x, node.c0xy.y = vc0xy->y, node.c0xy.z = vc0xy->z, node.c0xy.w = vc0xy->w;
node.c1xy.x = vc1xy->x, node.c1xy.y = vc1xy->y, node.c1xy.z = vc1xy->z, node.c1xy.w = vc1xy->w;
node.c01z.x = vc01z->x, node.c01z.y = vc01z->y, node.c01z.z = vc01z->z, node.c01z.w = vc01z->w;
node.children.x = vchildren->x, node.children.y = vchildren->y, node.children.z = vchildren->z, node.children.w = vchildren->w;
}
//------------------------------------------------------------------------
// Copies node to the node array
__device__ __forceinline__ void taskSaveNodeToGMEM(CudaBVHNode* g_bvh, int tid, int nodeIdx, const volatile CudaBVHNode& node)
{
ASSERT_DIVERGENCE("taskSaveNodeToGMEM top", tid);
// Copy the data to global memory
int* nodeAddr = (int*)(&g_bvh[nodeIdx]);
if(tid < sizeof(CudaBVHNode)/sizeof(int))
nodeAddr[tid] = ((const volatile int*)&node)[tid]; // Every thread copies one word of data of its task
ASSERT_DIVERGENCE("taskSaveNodeToGMEM bottom", tid);
}
//------------------------------------------------------------------------
// Update the pointer in the parent to point to this node
__device__ void taskUpdateParentPtr(CudaBVHNode* g_bvh, int parentIdx, int taskID, int newValue)
{
// Update the parent pointers
if(parentIdx != -1) // Not for the root
{
#if 0
if(newTask->taskID == 0) // Left child
{
atomicExch(&g_bvh[parentIdx].children.x, newValue); // Inform the parent of the position of the child
//g_bvh[parentIdx].children.x = newValue;
//atomicAnd(&g_bvh[parentIdx].children.w, 0xFFFFFFFD); // Inform the parent the left child is ready
}
else
{
atomicExch(&g_bvh[parentIdx].children.y, newValue); // Inform the parent of the position of the child
//g_bvh[parentIdx].children.y = newValue;
//atomicAnd(&g_bvh[parentIdx].children.w, 0xFFFFFFFE); // Inform the parent the right child is ready
}
#else
//atomicExch(((int*)&g_bvh[parentIdx].children) + taskID , newValue);
*(((int*)&g_bvh[parentIdx].children) + taskID) = newValue;
#endif
}
}
//------------------------------------------------------------------------
// Update the pointer in the parent to point to this node
__device__ void taskUpdateParentPtr(CudaKdtreeNode* g_kdtree, int parentIdx, int taskID, int newValue)
{
// Update the parent pointers
if(parentIdx != -1) // Not for the root
{
//atomicExch(((int*)&g_bvh[parentIdx].children) + taskID , newValue);
*(((int*)&g_kdtree[parentIdx]) + taskID) = newValue;
}
}
//------------------------------------------------------------------------
// Computes plane dimension of axis aligned planes
__device__ __forceinline__ int getPlaneDimension(const float4& plane)
{
return -plane.y - plane.z*2;
}
//------------------------------------------------------------------------
// Computes distance of a point from a plane
__device__ __forceinline__ float planeDistance(const float3& normal, const float& d, const float3& p)
{
return dot(normal, p) + d;
}
//------------------------------------------------------------------------
// Creates plane from three points
__device__ __forceinline__ float4 set3PointPlane(const float3& v0, const float3& v1, const float3& v2)
{
float3 normal = normalize(cross(v0-v1, v2-v1));
float d = -dot(normal, v1);
return make_float4(normal, d);
}
//------------------------------------------------------------------------
// Computes which side of the plane is a triangle on
__device__ __forceinline__ int getPlanePosition(const float4& plane, const float3& v0, const float3& v1, const float3& v2)
{
// Fetch plane
float3 normal;
normal.x = plane.x;
normal.y = plane.y;
normal.z = plane.z;
float d = plane.w;
int mn = 0;
int mx = 0;
float vd0, vd1, vd2; // Vertex distance
#if 1
// OPTIMIZE: Get rid of conditionals?
vd0 = planeDistance(normal, d, v0);
if(vd0 < EPS)
mn = -1;
if(vd0 > -EPS)
mx = 1;
vd1 = planeDistance(normal, d, v1);
if(vd1 < EPS)
mn = -1;
if(vd1 > -EPS)
mx = 1;
vd2 = planeDistance(normal, d, v2);
if(vd2 < EPS)
mn = -1;
if(vd2 > -EPS)
mx = 1;
#else
if(normal.x == -1.f)
{
int sgn1, sgn2;
sgn1 = signbit(v0.x - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v0.x - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v1.x - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v1.x - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v2.x - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v2.x - d - EPS);
mx = max(2*sgn2-1, mx);
}
else if(normal.y == -1.f)
{
int sgn1, sgn2;
sgn1 = signbit(v0.y - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v0.y - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v1.y - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v1.y - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v2.y - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v2.y - d - EPS);
mx = max(2*sgn2-1, mx);
}
else
{
int sgn1, sgn2;
sgn1 = signbit(v0.z - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v0.z - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v1.z - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v1.z - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v2.z - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v2.z - d - EPS);
mx = max(2*sgn2-1, mx);
}
#endif
return -(mn + mx);
}
//------------------------------------------------------------------------
__device__ __forceinline__ void getAABB(const float3& v0, const float3& v1, const float3& v2, CudaAABB& tbox)
{
tbox.m_mn.x = fminf(fminf(v0.x, v1.x), v2.x);
tbox.m_mn.y = fminf(fminf(v0.y, v1.y), v2.y);
tbox.m_mn.z = fminf(fminf(v0.z, v1.z), v2.z);
tbox.m_mx.x = fmaxf(fmaxf(v0.x, v1.x), v2.x);
tbox.m_mx.y = fmaxf(fmaxf(v0.y, v1.y), v2.y);
tbox.m_mx.z = fmaxf(fmaxf(v0.z, v1.z), v2.z);
}
//------------------------------------------------------------------------
// Computes the box and the centroid of a triangle
__device__ __forceinline__ float3 getCentroid(const float3& v0, const float3& v1, const float3& v2, CudaAABB& tbox)
{
getAABB(v0, v1, v2, tbox);
return (tbox.m_mn + tbox.m_mx)*0.5f;
}
//------------------------------------------------------------------------
// Computes which side of the plane is the point on based on its centroid
__device__ __forceinline__ int getPlaneCentroidPosition(const float4& plane, const float3& v0, const float3& v1, const float3& v2, CudaAABB& tbox)
{
// Fetch plane
float3 normal;
normal.x = plane.x;
normal.y = plane.y;
normal.z = plane.z;
float d = plane.w;
int pos;
float3 centroid = getCentroid(v0, v1, v2, tbox);
float ctd = planeDistance(normal, d, centroid);
if(ctd < EPS)
pos = -1;
else
pos = 1;
return pos;
}
//------------------------------------------------------------------------
// Split triangle bounding box based on spatial split location
__device__ __forceinline__ int getPlanePositionClipped(const float4& plane, const float3& v0, const float3& v1, const float3& v2, const CudaAABB& nodeBox)
{
int dim = getPlaneDimension(plane);
float split = plane.w;
CudaAABB triBox, triBoxL, triBoxR;
getAABB(v0, v1, v2, triBox);
// Because GPUs do not support register indexing we have to switch execution based on dimension
switch(dim)
{
case 0:
//initializing tight AABBs only for splitting dimension
triBoxL.m_mn.x = triBox.m_mn.x;
triBoxR.m_mx.x = triBox.m_mx.x;
triBoxL.m_mx.x = triBoxR.m_mn.x = split;
//two remaining dimensions are recomputed
{
//reordering vertices indices
const float3* _min = (v1.x <= v0.x) ? &v1 : &v0;
const float3* _max = (v1.x <= v0.x) ? &v0 : &v1;
const float3* vertMin = (v2.x < _min->x) ? &v2 : _min;
const float3* vertMax = (v2.x >= _max->x) ? &v2 : _max;
const float3* vertMid = (&v0 != vertMin && &v0 != vertMax) ? &v0 : ((&v1 != vertMin && &v1 != vertMax) ? &v1 : &v2);
const bool conda = split <= vertMid->x;
const float3* iA = conda ? vertMin : vertMax;
const float3* iB = vertMid;
const float3* iC = conda ? vertMax : vertMin;
const float ratio_ab = (split-iA->x)/(iB->x-iA->x);
const float ratio_cd = (split-iA->x)/(iC->x-iA->x);
const float x0 = iA->y + ratio_ab*(iB->y-iA->y);
const float x1 = iA->y + ratio_cd*(iC->y-iA->y);
const float xmin = fminf(x0, x1);
const float xmax = fmaxf(x0, x1);
if(conda){
triBoxL.m_mn.y = fminf(xmin, iA->y);
triBoxL.m_mx.y = fmaxf(xmax, iA->y);
triBoxR.m_mn.y = fminf(xmin, fminf(iB->y, iC->y));
triBoxR.m_mx.y = fmaxf(xmax, fmaxf(iB->y, iC->y));
}else{
triBoxR.m_mn.y = fminf(xmin, iA->y);
triBoxR.m_mx.y = fmaxf(xmax, iA->y);
triBoxL.m_mn.y = fminf(xmin, fminf(iB->y, iC->y));
triBoxL.m_mx.y = fmaxf(xmax, fmaxf(iB->y, iC->y));
}
const float y0 = iA->z + ratio_ab*(iB->z-iA->z);
const float y1 = iA->z + ratio_cd*(iC->z-iA->z);
const float ymin = fminf(y0, y1);
const float ymax = fmaxf(y0, y1);
if(conda){
triBoxL.m_mn.z = fminf(ymin, iA->z);
triBoxL.m_mx.z = fmaxf(ymax, iA->z);
triBoxR.m_mn.z = fminf(ymin, fminf(iB->z, iC->z));
triBoxR.m_mx.z = fmaxf(ymax, fmaxf(iB->z, iC->z));
}else{
triBoxR.m_mn.z = fminf(ymin, iA->z);
triBoxR.m_mx.z = fmaxf(ymax, iA->z);
triBoxL.m_mn.z = fminf(ymin, fminf(iB->z, iC->z));
triBoxL.m_mx.z = fmaxf(ymax, fmaxf(iB->z, iC->z));
}
}
break;
case 1:
//initializing tight AABBs only for splitting dimension
triBoxL.m_mn.y = triBox.m_mn.y;
triBoxR.m_mx.y = triBox.m_mx.y;
triBoxL.m_mx.y = triBoxR.m_mn.y = split;
//two remaining dimensions are recomputed
{
//reordering vertices indices
const float3* _min = (v1.y <= v0.y) ? &v1 : &v0;
const float3* _max = (v1.y <= v0.y) ? &v0 : &v1;
const float3* vertMin = (v2.y < _min->y) ? &v2 : _min;
const float3* vertMax = (v2.y >= _max->y) ? &v2 : _max;
const float3* vertMid = (&v0 != vertMin && &v0 != vertMax) ? &v0 : ((&v1 != vertMin && &v1 != vertMax) ? &v1 : &v2);
const bool conda = split <= vertMid->y;
const float3* iA = conda ? vertMin : vertMax;
const float3* iB = vertMid;
const float3* iC = conda ? vertMax : vertMin;
const float ratio_ab = (split-iA->y)/(iB->y-iA->y);
const float ratio_cd = (split-iA->y)/(iC->y-iA->y);
const float x0 = iA->x + ratio_ab*(iB->x-iA->x);
const float x1 = iA->x + ratio_cd*(iC->x-iA->x);
const float xmin = fminf(x0, x1);
const float xmax = fmaxf(x0, x1);
if(conda){
triBoxL.m_mn.x = fminf(xmin, iA->x);
triBoxL.m_mx.x = fmaxf(xmax, iA->x);
triBoxR.m_mn.x = fminf(xmin, fminf(iB->x, iC->x));
triBoxR.m_mx.x = fmaxf(xmax, fmaxf(iB->x, iC->x));
}else{
triBoxR.m_mn.x = fminf(xmin, iA->x);
triBoxR.m_mx.x = fmaxf(xmax, iA->x);
triBoxL.m_mn.x = fminf(xmin, fminf(iB->x, iC->x));
triBoxL.m_mx.x = fmaxf(xmax, fmaxf(iB->x, iC->x));
}
const float y0 = iA->z + ratio_ab*(iB->z-iA->z);
const float y1 = iA->z + ratio_cd*(iC->z-iA->z);
const float ymin = fminf(y0, y1);
const float ymax = fmaxf(y0, y1);
if(conda){
triBoxL.m_mn.z = fminf(ymin, iA->z);
triBoxL.m_mx.z = fmaxf(ymax, iA->z);
triBoxR.m_mn.z = fminf(ymin, fminf(iB->z, iC->z));
triBoxR.m_mx.z = fmaxf(ymax, fmaxf(iB->z, iC->z));
}else{
triBoxR.m_mn.z = fminf(ymin, iA->z);
triBoxR.m_mx.z = fmaxf(ymax, iA->z);
triBoxL.m_mn.z = fminf(ymin, fminf(iB->z, iC->z));
triBoxL.m_mx.z = fmaxf(ymax, fmaxf(iB->z, iC->z));
}
}
break;
case 2:
//initializing tight AABBs only for splitting dimension
triBoxL.m_mn.z = triBox.m_mn.z;
triBoxR.m_mx.z = triBox.m_mx.z;
triBoxL.m_mx.z = triBoxR.m_mn.z = split;
//two remaining dimensions are recomputed
{
//reordering vertices indices
const float3* _min = (v1.z <= v0.z) ? &v1 : &v0;
const float3* _max = (v1.z <= v0.z) ? &v0 : &v1;
const float3* vertMin = (v2.z < _min->z) ? &v2 : _min;
const float3* vertMax = (v2.z >= _max->z) ? &v2 : _max;
const float3* vertMid = (&v0 != vertMin && &v0 != vertMax) ? &v0 : ((&v1 != vertMin && &v1 != vertMax) ? &v1 : &v2);
const bool conda = split <= vertMid->z;
const float3* iA = conda ? vertMin : vertMax;
const float3* iB = vertMid;
const float3* iC = conda ? vertMax : vertMin;
const float ratio_ab = (split-iA->z)/(iB->z-iA->z);
const float ratio_cd = (split-iA->z)/(iC->z-iA->z);
const float x0 = iA->y + ratio_ab*(iB->y-iA->y);
const float x1 = iA->y + ratio_cd*(iC->y-iA->y);
const float xmin = fminf(x0, x1);
const float xmax = fmaxf(x0, x1);
if(conda){
triBoxL.m_mn.y = fminf(xmin, iA->y);
triBoxL.m_mx.y = fmaxf(xmax, iA->y);
triBoxR.m_mn.y = fminf(xmin, fminf(iB->y, iC->y));
triBoxR.m_mx.y = fmaxf(xmax, fmaxf(iB->y, iC->y));
}else{
triBoxR.m_mn.y = fminf(xmin, iA->y);
triBoxR.m_mx.y = fmaxf(xmax, iA->y);
triBoxL.m_mn.y = fminf(xmin, fminf(iB->y, iC->y));
triBoxL.m_mx.y = fmaxf(xmax, fmaxf(iB->y, iC->y));
}
const float y0 = iA->x + ratio_ab*(iB->x-iA->x);
const float y1 = iA->x + ratio_cd*(iC->x-iA->x);
const float ymin = fminf(y0, y1);
const float ymax = fmaxf(y0, y1);
if(conda){
triBoxL.m_mn.x = fminf(ymin, iA->x);
triBoxL.m_mx.x = fmaxf(ymax, iA->x);
triBoxR.m_mn.x = fminf(ymin, fminf(iB->x, iC->x));
triBoxR.m_mx.x = fmaxf(ymax, fmaxf(iB->x, iC->x));
}else{
triBoxR.m_mn.x = fminf(ymin, iA->x);
triBoxR.m_mx.x = fmaxf(ymax, iA->x);
triBoxL.m_mn.x = fminf(ymin, fminf(iB->x, iC->x));
triBoxL.m_mx.x = fmaxf(ymax, fmaxf(iB->x, iC->x));
}
}
break;
}
float3 intersectMn = fmaxf(triBoxL.m_mn, nodeBox.m_mn);
float3 intersectMx = fminf(triBoxL.m_mx, nodeBox.m_mx);
bool leftIsect = (intersectMn.x <= intersectMx.x) && (intersectMn.y <= intersectMx.y) && (intersectMn.z <= intersectMx.z);
intersectMn = fmaxf(triBoxR.m_mn, nodeBox.m_mn);
intersectMx = fminf(triBoxR.m_mx, nodeBox.m_mx);
bool rightIsect = (intersectMn.x <= intersectMx.x) && (intersectMn.y <= intersectMx.y) && (intersectMn.z <= intersectMx.z);
return -1*leftIsect + 1*rightIsect;
}
inline __host__ __device__ double3 operator+(double3 a, double3 b)
{
return make_double3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __host__ __device__ double3 operator-(double3 a, double3 b)
{
return make_double3(a.x - b.x, a.y - b.y, a.z - b.z);
}
inline __host__ __device__ double3 operator*(double3 a, float b)
{
return make_double3(a.x * b, a.y * b, a.z * b);
}
//------------------------------------------------------------------------
__device__ __forceinline__ void boxCenterHalfSize(const CudaAABB& nodeBox, float3& center, float3& halfSize)
{
center = (nodeBox.m_mn + nodeBox.m_mx)*0.5f;
halfSize = (nodeBox.m_mx - nodeBox.m_mn)*0.5f/* + 2000*EPS*/;
/*double3 cD, hD;
cD = (make_double3(nodeBox.m_mn.x, nodeBox.m_mn.y, nodeBox.m_mn.z) + make_double3(nodeBox.m_mx.x, nodeBox.m_mx.y, nodeBox.m_mx.z))*0.5;
hD = (make_double3(nodeBox.m_mx.x, nodeBox.m_mx.y, nodeBox.m_mx.z) - make_double3(nodeBox.m_mn.x, nodeBox.m_mn.y, nodeBox.m_mn.z))*0.5;
center = make_float3(cD.x, cD.y, cD.z);
halfSize = make_float3(hD.x, hD.y, hD.z);*/
}
//------------------------------------------------------------------------
// Compute triangle's position wrt splitting plane by computing its intersection with children bounding boxes
__device__ __forceinline__ int getTriChildOverlap(const float4& plane, const float3& v0, const float3& v1, const float3& v2, const CudaAABB& nodeBox)
{
int dim = getPlaneDimension(plane);
float split = plane.w;
CudaAABB nodeBoxL, nodeBoxR;
nodeBoxL = nodeBoxR = nodeBox;
// Because GPUs do not support register indexing we have to switch execution based on dimension
switch(dim)
{
case 0:
nodeBoxL.m_mx.x = nodeBoxR.m_mn.x = split;
break;
case 1:
nodeBoxL.m_mx.y = nodeBoxR.m_mn.y = split;
break;
case 2:
nodeBoxL.m_mx.z = nodeBoxR.m_mn.z = split;
break;
}
float3 boxCenterL, boxHalfSizeL;
boxCenterHalfSize(nodeBoxL, boxCenterL, boxHalfSizeL);
int leftIsect = triBoxOverlap(boxCenterL, boxHalfSizeL, v0, v1, v2, nodeBoxL.m_mn, nodeBoxL.m_mx);
float3 boxCenterR, boxHalfSizeR;
boxCenterHalfSize(nodeBoxR, boxCenterR, boxHalfSizeR);
int rightIsect = triBoxOverlap(boxCenterR, boxHalfSizeR, v0, v1, v2, nodeBoxR.m_mn, nodeBoxR.m_mx);
if(leftIsect == 0 && rightIsect == 0) // Should not happen, but happens due to numerical imprecision
{
//printf("Cannot happen!\n");
return -1;
}
return -1*leftIsect + 1*rightIsect;
}
//------------------------------------------------------------------------
// Computes which side of the plane is a ray on
__device__ __forceinline__ int getPlanePosition(const float4& plane, const float3& orig, const float3& dir, const float& tmin, const float& tmax, int& orderCounter)
{
// Fetch plane
float3 normal;
normal.x = plane.x;
normal.y = plane.y;
normal.z = plane.z;
float d = plane.w;
int retVal;
#if 0
int min = 0;
int max = 0;
float d1 = planeDistance(normal, d, orig + tmin*dir);
float d2 = planeDistance(normal, d, orig + tmax*dir);
// OPTIMIZE: Get rid of conditionals?
if (d1 < EPS)
min = -1;
if (d1 > -EPS)
max = 1;
if (d2 < EPS)
min = -1;
if (d2 > -EPS)
max = 1;
retVal = min + max;
#else
float dv = dot(dir, normal);
orderCounter = 0;
#define COPLANAR_EPS 1e-30f
if(dv < -COPLANAR_EPS)
{
// the ray will hit from the front side
float t = -planeDistance(normal, d, orig) / dv;
if (t > tmax + EPS)
retVal = 1;
else if (t < tmin - EPS)
retVal = -1;
else
{
// hits the plane from front to back
orderCounter = -1;
retVal = 0;
}
}
else if(dv > COPLANAR_EPS)
{
// the ray will hit from the front side
float t = -planeDistance(normal, d, orig) / dv;
if (t > tmax + EPS)
retVal = -1;
else if (t < tmin - EPS)
retVal = 1;
else
{
// hits the plane from back to front
orderCounter = 1;
retVal = 0;
}
}
else
{
int min = 0;
int max = 0;
float d1 = planeDistance(normal, d, orig + tmin*dir);
float d2 = planeDistance(normal, d, orig + tmax*dir);
// OPTIMIZE: Get rid of conditionals?
if (d1 < EPS)
min = -1;
if (d1 > -EPS)
max = 1;
if (d2 < EPS)
min = -1;
if (d2 > -EPS)
max = 1;
retVal = min + max;
}
#endif
return retVal;
}
//------------------------------------------------------------------------
// Computes the number of samples for the cost function
__device__ __host__ __forceinline__ int getNumberOfSamples(const int& number)
{
return (int)sqrtf(number);
}
//------------------------------------------------------------------------
// Computes area of the bounding box
__device__ __forceinline__ float areaAABB(const volatile CudaAABB& bbox)
{
float3 d;
d.x = bbox.m_mx.x - bbox.m_mn.x;
d.y = bbox.m_mx.y - bbox.m_mn.y;
d.z = bbox.m_mx.z - bbox.m_mn.z;
return (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
}
//------------------------------------------------------------------------
// Computes areas of left and right parts of bounding box divided by x
__device__ __forceinline__ void areaAABBX(const volatile CudaAABB& bbox, float pos, float& areaLeft, float& areaRight)
{
float3 d;
d.x = pos - bbox.m_mn.x;
d.y = bbox.m_mx.y - bbox.m_mn.y;
d.z = bbox.m_mx.z - bbox.m_mn.z;
areaLeft = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
d.x = bbox.m_mx.x - pos;
areaRight = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
}
//------------------------------------------------------------------------
// Computes areas of left and right parts of bounding box divided by y
__device__ __forceinline__ void areaAABBY(const volatile CudaAABB& bbox, float pos, float& areaLeft, float& areaRight)
{
float3 d;
d.x = bbox.m_mx.x - bbox.m_mn.x;
d.y = pos - bbox.m_mn.y;
d.z = bbox.m_mx.z - bbox.m_mn.z;
areaLeft = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
d.y = bbox.m_mx.y - pos;
areaRight = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
}
//------------------------------------------------------------------------
// Computes areas of left and right parts of bounding box divided by x
__device__ __forceinline__ void areaAABBZ(const volatile CudaAABB& bbox, float pos, float& areaLeft, float& areaRight)
{
float3 d;
d.x = bbox.m_mx.x - bbox.m_mn.x;
d.y = bbox.m_mx.y - bbox.m_mn.y;
d.z = pos - bbox.m_mn.z;
areaLeft = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
d.z = bbox.m_mx.z - pos;
areaRight = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
}
//------------------------------------------------------------------------
// Choose axis based on Havran's lonest-axis + round-robin mixture
__device__ __forceinline__ int taskAxis(volatile float4& plane, const volatile CudaAABB& bbox, volatile int &sharedInt, int axis)
{
volatile float* tPln = ((volatile float*)&plane)+threadIdx.x;
volatile float* tMin = ((volatile float*)&bbox.m_mn)+threadIdx.x;
volatile float* tMax = ((volatile float*)&bbox.m_mx)+threadIdx.x;
// Compute longest axis
if(threadIdx.x < 3)
{
*tPln = *tMax - *tMin;
float dMax = max3(plane.x, plane.y, plane.z);
if(__ffs(__ballot(dMax == *tPln)) == threadIdx.x+1) // First thread with such condition
{
sharedInt = threadIdx.x; // Longest axis
}
}
int warpIdx = blockDim.y*blockIdx.x + threadIdx.y; // Warp ID
return ((warpIdx & 0x3) != 0) ? axis : sharedInt;
}
//------------------------------------------------------------------------
// Splits the node with bounding box's spatial median along the longest axis
__device__ void splitMedian(int tid, int axis, volatile float4& plane, const volatile CudaAABB& bbox)
{
ASSERT_DIVERGENCE("splitMedian", tid);
volatile float* tPln = ((volatile float*)&plane)+tid;
volatile float* tMin = ((volatile float*)&bbox.m_mn)+tid;
volatile float* tMax = ((volatile float*)&bbox.m_mx)+tid;
#if 0 // Longest axis
// Compute spatial median
if(tid < 3)
{
#if 1
*tPln = *tMax - *tMin;
float dMax = max3(plane.x, plane.y, plane.z);
if(__ffs(__ballot(dMax == *tPln)) == tid+1) // First thread with such condition
{
plane.w = -(*tMin + *tMax) / 2.0f;
*tPln = 1;
}
else
{
*tPln = 0;
}
#else
if(tid == 0) // Single thread median split
{
if(dMax == plane[threadIdx.y].x)
{
plane[threadIdx.y].x = 1;
plane[threadIdx.y].w = -(bbox.m_mn.x + bbox.m_mx.x) / 2.0f;
}
else
plane[threadIdx.y].x = 0;
if(dMax == plane[threadIdx.y].y)
{
plane[threadIdx.y].y = 1;
plane[threadIdx.y].w = -(bbox.m_mn.y + bbox.m_mx.y) / 2.0f;
}
else
plane[threadIdx.y].y = 0;
if(dMax == plane[threadIdx.y].z)
{
plane[threadIdx.y].z = 1;
plane[threadIdx.y].w = -(bbox.m_mn.z + bbox.m_mx.z) / 2.0f;
}
else
plane[threadIdx.y].z = 0;
}
#endif
}
#else // Round robin
//int axis = depth % 3;
if(tid < 3)
{
*tPln = *tMax - *tMin;
if(tid == axis)
{
plane.w = -(*tMin + *tMax) / 2.0f;
*tPln = 1;
}
else
{
*tPln = 0;
}
}
#endif
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread based on AABB
__device__ void findPlaneAABB(int planePos, const volatile CudaAABB& bbox, float4& plane, int numAxisAlignedPlanes)
{
//ASSERT_DIVERGENCE("findPlaneAABB", threadIdx.x);
#if 1 // Equal number of planes in each dimension
int planesPerAxis = ((numAxisAlignedPlanes+2) / 3);
int axis = planePos / planesPerAxis;
float rpos = (float)(1 + (planePos % planesPerAxis))/(float)(planesPerAxis+1);
if(axis == 0)
{
float pos = bbox.m_mn.x + (bbox.m_mx.x - bbox.m_mn.x) * rpos;
plane = make_float4(-1.f, 0.f, 0.f, pos);
}
else if(axis == 1)
{
float pos = bbox.m_mn.y + (bbox.m_mx.y - bbox.m_mn.y) * rpos;
plane = make_float4(0.f, -1.f, 0.f, pos);
}
else
{
float pos = bbox.m_mn.z + (bbox.m_mx.z - bbox.m_mn.z) * rpos;
plane = make_float4(0.f, 0.f, -1.f, pos);
}
#else
float lX = bbox.m_mx.x - bbox.m_mn.x;
float lY = bbox.m_mx.y - bbox.m_mn.y;
float lZ = bbox.m_mx.z - bbox.m_mn.z;
float sumLengths = lX + lY + lZ;
// Assign the planes to different methods
int numX = lX/sumLengths*PLANE_COUNT+0.5f;
int numY = lY/sumLengths*PLANE_COUNT+0.5f;
int numZ = lZ/sumLengths*PLANE_COUNT+0.5f;
//int axis = (planePos < numX) ? 0 : (planePos < numX+numY) ? 1 : 2;
int axis = (planePos >= numX) + (planePos >= numX+numY);
if(axis == 0)
{
float rpos = (float)(planePos+1) / (float)(numX+1);
float pos = bbox.m_mn.x + lX * rpos;
plane = make_float4(-1.f, 0.f, 0.f, pos);
}
else if(axis == 1)
{
float rpos = (float)(planePos-numX+1) / (float)(numY+1);
float pos = bbox.m_mn.y + lY * rpos;
plane = make_float4(0.f, -1.f, 0.f, pos);
}
else
{
float rpos = (float)(planePos-numX-numY+1) / (float)(numZ+1);
float pos = bbox.m_mn.z + lZ * rpos;
plane = make_float4(0.f, 0.f, -1.f, pos);
}
#endif
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread
__device__ void findPlaneAABB(int planePos, const volatile CudaAABB& bbox, float& areaLeft, float& areaRight, float4& plane, int numAxisAlignedPlanes)
{
//ASSERT_DIVERGENCE("findPlaneAABB", threadIdx.x);
#if 1 // Equal number of planes in each dimension
int planesPerAxis = ((numAxisAlignedPlanes+2) / 3);
int axis = planePos / planesPerAxis;
float rpos = (float)( 1 + (planePos % planesPerAxis))/(float)(planesPerAxis+1);
if(axis == 0)
{
float pos = bbox.m_mn.x + (bbox.m_mx.x - bbox.m_mn.x) * rpos;
plane = make_float4(-1.f, 0.f, 0.f, pos);
areaAABBX(bbox, pos, areaLeft, areaRight);
}
else if(axis == 1)
{
float pos = bbox.m_mn.y + (bbox.m_mx.y - bbox.m_mn.y) * rpos;
plane = make_float4(0.f, -1.f, 0.f, pos);
areaAABBY(bbox, pos, areaLeft, areaRight);
}
else
{
float pos = bbox.m_mn.z + (bbox.m_mx.z - bbox.m_mn.z) * rpos;
plane = make_float4(0.f, 0.f, -1.f, pos);
areaAABBZ(bbox, pos, areaLeft, areaRight);
}
#else
float lX = bbox.m_mx.x - bbox.m_mn.x;
float lY = bbox.m_mx.y - bbox.m_mn.y;
float lZ = bbox.m_mx.z - bbox.m_mn.z;
float sumLengths = lX + lY + lZ;
// Assign the planes to different methods
int numX = lX/sumLengths*PLANE_COUNT+0.5f;
int numY = lY/sumLengths*PLANE_COUNT+0.5f;
int numZ = lZ/sumLengths*PLANE_COUNT+0.5f;
//int axis = (planePos < numX) ? 0 : (planePos < numX+numY) ? 1 : 2;
int axis = (planePos >= numX) + (planePos >= numX+numY);
if(axis == 0)
{
float rpos = (float)(planePos+1) / (float)(numX+1);
float pos = bbox.m_mn.x + lX * rpos;
plane = make_float4(-1.f, 0.f, 0.f, pos);
areaAABBX(bbox, pos, areaLeft, areaRight);
}
else if(axis == 1)
{
float rpos = (float)(planePos-numX+1) / (float)(numY+1);
float pos = bbox.m_mn.y + lY * rpos;
plane = make_float4(0.f, -1.f, 0.f, pos);
areaAABBY(bbox, pos, areaLeft, areaRight);
}
else
{
float rpos = (float)(planePos-numX-numY+1) / (float)(numZ+1);
float pos = bbox.m_mn.z + lZ * rpos;
plane = make_float4(0.f, 0.f, -1.f, pos);
areaAABBZ(bbox, pos, areaLeft, areaRight);
}
#endif
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread
__device__ void findPlaneTriAABB(int planePos, float4* tris, int* trisIndex, int triStart, const volatile CudaAABB& bbox, float& areaLeft, float& areaRight, float4& plane, int numAxisAlignedPlanes)
{
//ASSERT_DIVERGENCE("findPlaneTriAABB", threadIdx.x);
int tri = planePos / 6;
int axis = (planePos % 6) / 2;
int lim = (planePos % 6) - axis;
int triidx = trisIndex[triStart + tri]*3;
// Fetch triangle
float3 v0, v1, v2;
taskFetchTri((hipDeviceptr_t)tris, triidx, v0, v1, v2);
// Get bounding box
CudaAABB tbox;
getAABB(v0, v1, v0, tbox);
if(axis == 0)
{
float pos;
if(lim == 0)
pos = tbox.m_mn.x;
else
pos = tbox.m_mx.x;
plane = make_float4(-1.f, 0.f, 0.f, pos);
areaAABBX(bbox, pos, areaLeft, areaRight);
}
else if(axis == 1)
{
float pos;
if(lim == 0)
pos = tbox.m_mn.y;
else
pos = tbox.m_mx.y;
plane = make_float4(0.f, -1.f, 0.f, pos);
areaAABBY(bbox, pos, areaLeft, areaRight);
}
else
{
float pos;
if(lim == 0)
pos = tbox.m_mn.z;
else
pos = tbox.m_mx.z;
plane = make_float4(0.f, 0.f, -1.f, pos);
areaAABBZ(bbox, pos, areaLeft, areaRight);
}
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread based on triangle division
__device__ void findPlaneTriAA(int planePos, hipDeviceptr_t tris, hipDeviceptr_t trisIndex, int triStart, int triEnd, float4& plane, int numAxisAlignedPlanes)
{
int planesPerAxis = ((numAxisAlignedPlanes+2) / 3);
int axis = planePos / planesPerAxis;
int triNum = triEnd - triStart;
/*unsigned int hashA = planePos;
unsigned int hashB = 0x9e3779b9u;
unsigned int hashC = 0x9e3779b9u;
jenkinsMix(hashA, hashB, hashC);
jenkinsMix(hashA, hashB, hashC);
int triidx = ((int*)trisIndex)[triStart + (hashC % triNum)]*3;*/
float tpos = (float)(planePos % planesPerAxis)/(float)(planesPerAxis-1);
int triidx = ((int*)trisIndex)[triStart + (int)(tpos * (triNum-1))]*3;
// Fetch triangle
float3 v0, v1, v2;
taskFetchTri(tris, triidx, v0, v1, v2);
// Compute triangle centroid
CudaAABB tbox;
float3 cent = getCentroid(v0, v1, v2, tbox);
// Compute axis aligned plane through its centoid
if(axis == 0)
{
plane = make_float4(-1.f, 0.f, 0.f, cent.x);
}
else if(axis == 1)
{
plane = make_float4(0.f, -1.f, 0.f, cent.y);
}
else
{
plane = make_float4(0.f, 0.f, -1.f, cent.z);
}
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread based on triangle division
__device__ void findPlaneTri(int planePos, hipDeviceptr_t tris, hipDeviceptr_t trisIndex, int triStart, int triEnd, float4& plane)
{
ASSERT_DIVERGENCE("findPlaneTri", threadIdx.x);
int triNum = triEnd - triStart;
unsigned int hashA = planePos;
unsigned int hashB = 0x9e3779b9u;
unsigned int hashC = 0x9e3779b9u;
jenkinsMix(hashA, hashB, hashC);
jenkinsMix(hashA, hashB, hashC);
int triidx = ((int*)trisIndex)[triStart + (hashC % triNum)]*3;
// Fetch triangle
float3 v0, v1, v2;
taskFetchTri(tris, triidx, v0, v1, v2);
plane = set3PointPlane(v0, v1, v2);
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread based on ray division
__device__ void findPlaneRay(int planePos, hipDeviceptr_t rays, hipDeviceptr_t raysIndex, int rayStart, int rayEnd, float4& plane)
{
ASSERT_DIVERGENCE("findPlaneRay", threadIdx.x);
// BUG: Fails because of unclipped rays
// Good strategy - only for primary rays
// partitioning using an edge of random triangle and camera origin
// RAY1 min / RAY1 max / RAY2 min
int rayNum = rayEnd - rayStart;
unsigned int hashA = planePos;
unsigned int hashB = 0x9e3779b9u;
unsigned int hashC = 0x9e3779b9u;
jenkinsMix(hashA, hashB, hashC);
jenkinsMix(hashA, hashB, hashC);
int raypos1 = rayStart + (hashC % rayNum);
int rayidx1 = ((int*)raysIndex)[raypos1];
float3 orig, dir;
float tmin, tmax;
taskFetchRay(rays, rayidx1, orig, dir, tmin, tmax);
float3 v0 = orig + tmin*dir;
float3 v1 = orig + tmax*dir;
int raypos2 = raypos1+1;
if(raypos2 >= rayEnd)
raypos2 = rayStart;
int rayidx2 = ((int*)raysIndex)[raypos2];
taskFetchRay(rays, rayidx2, orig, dir, tmin, tmax);
float3 v2 = orig + tmax*dir;
if(hashA & 0x1)
v2 = v1 + cross(v1-v0, v2-v1);
plane = set3PointPlane(v0, v1, v2);
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread
__device__ void findPlane(int planePos, hipDeviceptr_t rays, hipDeviceptr_t raysIndex, int rayStart, int rayEnd, hipDeviceptr_t tris, hipDeviceptr_t trisIndex, int triStart, int triEnd, const volatile CudaAABB& bbox, int numAxisAlignedPlanes, int numTriangleBasedPlanes, float4& plane)
{
ASSERT_DIVERGENCE("findPlane", threadIdx.x);
if(planePos < numAxisAlignedPlanes) // Choose axis aligned plane
{
findPlaneAABB(planePos, bbox, plane, numAxisAlignedPlanes);
}
else if(planePos < numAxisAlignedPlanes + numTriangleBasedPlanes) // Choose triangle based plane
{
findPlaneTri(planePos, tris, trisIndex, triStart, triEnd, plane);
}
else // Choose ray based plane
{
findPlaneRay(planePos, rays, raysIndex, rayStart, rayEnd, plane);
}
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread in the chosen axis
__device__ void findPlaneRobin(int planePos, const volatile CudaAABB& bbox, int axis, float4& plane)
{
ASSERT_DIVERGENCE("findPlaneRobin", threadIdx.x);
float rpos = (float)(planePos+1) / (float)(WARP_SIZE+1);
if(axis == 0)
{
float pos = bbox.m_mn.x + (bbox.m_mx.x - bbox.m_mn.x) * rpos;
plane = make_float4(-1.f, 0.f, 0.f, pos);
}
else if(axis == 1)
{
float pos = bbox.m_mn.y + (bbox.m_mx.y - bbox.m_mn.y) * rpos;
plane = make_float4(0.f, -1.f, 0.f, pos);
}
else
{
float pos = bbox.m_mn.z + (bbox.m_mx.z - bbox.m_mn.z) * rpos;
plane = make_float4(0.f, 0.f, -1.f, pos);
}
}
//------------------------------------------------------------------------
// Computes Woop triangle from a regular one
__device__ void calcWoop(float3& v0, float3& v1, float3& v2, float4& o0, float4& o1, float4& o2)
{
// Compute woop
float3 c0 = v0 - v2;
float3 c1 = v1 - v2;
float3 c2 = cross(c0,c1);
// division by 0 ???
float det = 1.0/(c0.x*(c2.z*c1.y-c1.z*c2.y) - c0.y*(c2.z*c1.x-c1.z*c2.x) + c0.z*(c2.y*c1.x-c1.y*c2.x));
float3 i0,i1,i2;
//i0 =
i0.x = (c2.z*c1.y-c1.z*c2.y)*det;
i0.y = -(c2.z*c1.x-c1.z*c2.x)*det;
i0.z = (c2.y*c1.x-c1.y*c2.x)*det;
//i1 =
i1.x = -(c2.z*c0.y-c0.z*c2.y)*det;
i1.y = (c2.z*c0.x-c0.z*c2.x)*det;
i1.z = -(c2.y*c0.x-c0.y*c2.x)*det;
//i2 =
i2.x = (c1.z*c0.y-c0.z*c1.y)*det;
i2.y = -(c1.z*c0.x-c0.z*c1.x)*det;
i2.z = (c1.y*c0.x-c0.y*c1.x)*det;
// Final values
o0.x = i2.x;
o0.y = i2.y;
o0.z = i2.z;
o0.w = -dot(-i2,v2);
o1.x = i0.x;
o1.y = i0.y;
o1.z = i0.z;
o1.w = dot(-i0,v2);
o2.x = i1.x;
o2.y = i1.y;
o2.z = i1.z;
o2.w = dot(-i1,v2);
if (o0.x == 0.0f)
o0.x = 0.0f;
}
//------------------------------------------------------------------------
// Creates a node in the compact layout
__device__ int createLeaf(int tid, int outOfs, float* outTriMem, int* outIdxMem, int start, int end, float* inTriMem, int* inIdxMem)
{
// Compute output data pointers
int numTris = end-start;
float4 triData;
int idxData;
int* inIdx = inIdxMem + start; // Memory for the first triangle index
float4* outTri = ((float4*)outTriMem) + outOfs; // Memory for the first triangle data
int* outIdx = outIdxMem + outOfs; // Memory for the first triangle index
// Write out all triangles and the triangle sentinel per vertex
int numIters = taskWarpSubtasksZero(numTris*3+1); // Number of written out data chunks divided by WARP_SIZE
for(int i = 0; i < numIters; i++)
{
int pos = i*WARP_SIZE + tid;
int tri = pos/3;
int item = pos % 3;
if(tri < numTris) // Regular triangle
{
idxData = inIdx[tri];
/*float4* inTri = ((float4*)inTriMem) + idxData*3; // Memory for the first triangle data
//triData.x = inTri[item].x;
//triData.y = inTri[item].y;
//triData.z = inTri[item].z;
//triData.w = inTri[item].w;
triData = inTri[item];*/
triData = tex1Dfetch(t_trisA, idxData*3 + item);
}
else // Sentinel
{
idxData = 0;
triData = make_float4(__int_as_float(0x80000000));
}
// Write out the data
if(tri < numTris || (tri == numTris && item == 0))
{
outTri[pos] = triData;
outIdx[pos] = idxData;
}
}
return ~outOfs;
}
//------------------------------------------------------------------------
// Creates a leaf in the compact layout, with Woop triangles
__device__ int createLeafWoop(int tid, int outOfs, float4* outTriMem, int* outIdxMem, int start, int end, float4* inTriMem, int* inIdxMem)
{
// Compute output data pointers
int numTris = end-start;
int idxData;
int* inIdx = inIdxMem + start; // Memory for the first triangle index
float4* outTri = outTriMem + outOfs; // Memory for the first triangle data
int* outIdx = outIdxMem + outOfs; // Memory for the first triangle index
// Write out all triangles and the triangle sentinel per vertex
int numIters = taskWarpSubtasksZero(numTris); // Number of written out data chunks divided by WARP_SIZE
for(int i = 0; i < numIters; i++)
{
int tri = i*WARP_SIZE + tid;
int pos = tri*3;
if(tri < numTris) // Regular triangle
{
idxData = inIdx[tri];
float3 v0, v1, v2;
float4 o0, o1, o2;
taskFetchTri((hipDeviceptr_t)inTriMem, idxData*3, v0, v1, v2);
calcWoop(v0, v1, v2, o0, o1, o2);
outTri[pos+0] = o0;
outTri[pos+1] = o1;
outTri[pos+2] = o2;
outIdx[pos] = idxData;
}
}
if(tid == 0)
{
outTri[numTris*3].x = __int_as_float(0x80000000);
outIdx[numTris*3] = 0;
}
return ~outOfs;
}
//------------------------------------------------------------------------
// Creates a leaf in the compact layout, with references to triangles
__device__ int createLeafReference(int tid, int outOfs, int* outIdxMem, int start, int end, int* inIdxMem)
{
// Compute output data pointers
int numTris = end-start;
int idxData;
int* inIdx = inIdxMem + start; // Memory for the first triangle index
int* outIdx = outIdxMem + outOfs; // Memory for the first triangle index
// Write out all triangles and the triangle sentinel per vertex
int numIters = taskWarpSubtasksZero(numTris); // Number of written out data chunks divided by WARP_SIZE
for(int i = 0; i < numIters; i++)
{
int tri = i*WARP_SIZE + tid;
if(tri < numTris) // Regular triangle
{
idxData = inIdx[tri];
outIdx[tri] = idxData;
}
}
if(tid == 0)
{
outIdx[numTris] = 0x80000000;
}
return ~outOfs;
}
//------------------------------------------------------------------------
// Creates a leaf for a Kdtree, with Woop triangles
__device__ int createKdtreeLeafWoop(int tid, int outOfs, float4* outTriMem, int* outIdxMem, int start, int end, float4* inTriMem, int* inIdxMem)
{
// Compute output data pointers
int numTris = end-start;
int idxData;
int* inIdx = inIdxMem + start; // Memory for the first triangle index
float4* outTri = outTriMem + outOfs; // Memory for the first triangle data
int* outIdx = outIdxMem + outOfs; // Memory for the first triangle index
// Write out all triangles and the triangle sentinel per vertex
int numIters = taskWarpSubtasksZero(numTris); // Number of written out data chunks divided by WARP_SIZE
for(int i = 0; i < numIters; i++)
{
int tri = i*WARP_SIZE + tid;
int pos = tri*3;
if(tri < numTris) // Regular triangle
{
idxData = inIdx[tri];
float3 v0, v1, v2;
float4 o0, o1, o2;
taskFetchTri((hipDeviceptr_t)inTriMem, idxData*3, v0, v1, v2);
calcWoop(v0, v1, v2, o0, o1, o2);
outTri[pos+0] = o0;
outTri[pos+1] = o1;
outTri[pos+2] = o2;
outIdx[pos] = idxData;
}
}
return numTris | KDTREE_LEAF;
}
//------------------------------------------------------------------------
// Creates a leaf for a Kdtree, with Woop triangles
__device__ int createKdtreeInterleavedLeafWoop(int tid, int outOfs, char* outTriMem, int start, int end, float4* inTriMem, int* inIdxMem)
{
// Compute output data pointers
int numTris = end-start;
int idxData;
int* inIdx = inIdxMem + start; // Memory for the first triangle index
float4* outTri = (float4*)(outTriMem + outOfs); // Memory for the first triangle data
int* outIdx = (int*)(outTriMem + outOfs + numTris*3*sizeof(float4)); // Memory for the first triangle index
// Write out all triangles and the triangle sentinel per vertex
int numIters = taskWarpSubtasksZero(numTris); // Number of written out data chunks divided by WARP_SIZE
for(int i = 0; i < numIters; i++)
{
int tri = i*WARP_SIZE + tid;
int pos = tri*3;
if(tri < numTris) // Regular triangle
{
idxData = inIdx[tri];
float3 v0, v1, v2;
float4 o0, o1, o2;
taskFetchTri((hipDeviceptr_t)inTriMem, idxData*3, v0, v1, v2);
calcWoop(v0, v1, v2, o0, o1, o2);
outTri[pos+0] = o0;
outTri[pos+1] = o1;
outTri[pos+2] = o2;
outIdx[tri] = idxData;
}
}
return numTris | KDTREE_LEAF;
}
//------------------------------------------------------------------------
// Kernel converting regular triangles to Woop triangles
extern "C" __global__ void createWoop(hipDeviceptr_t tri, hipDeviceptr_t woop, int numTris)
{
// Compute output data pointers
int idx = blockDim.x * blockIdx.x + threadIdx.x; // 1D index
if(idx < numTris)
{
float3 v0, v1, v2;
float4 o0, o1, o2;
taskFetchTri(tri, idx*3, v0, v1, v2);
calcWoop(v0, v1, v2, o0, o1, o2);
float4* woopData = (float4*)woop;
woopData[idx*3+0] = o0;
woopData[idx*3+1] = o1;
woopData[idx*3+2] = o2;
}
}
//------------------------------------------------------------------------
// Returns true if the node is a leaf
__device__ bool isKdLeaf(int flag)
{
#if defined(COMPACT_LAYOUT) && defined(WOOP_TRIANGLES)
return flag < 0;
#else
return flag & KDTREE_LEAF;
#endif
}
//------------------------------------------------------------------------ | rt_common.cu | /*
* Copyright 2009-2010 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Common functionality for ray tracing based framework specializations.
"Massively Parallel Hierarchical Scene Sorting with Applications in Rendering",
Marek Vinkler, Michal Hapala, Jiri Bittner and Vlastimil Havran,
Computer Graphics Forum 2012
*/
#pragma once
#include "rt_common.cuh"
#include "tri_box_overlap.cuh"
//------------------------------------------------------------------------
// Returns the right TaskType based on the SCAN_TYPE
__device__ __forceinline__ TaskType taskChooseScanType(int unfinished)
{
#if SCAN_TYPE == 0
return TaskType_Sort_PPS1;
#elif SCAN_TYPE == 1
if(unfinished < 8) // Value of 8 corresponds to 256 items where there is a crossover between naive and Harris
return TaskType_Sort_PPS1;
else
return TaskType_Sort_PPS1_Up;
#elif SCAN_TYPE == 2 || SCAN_TYPE == 3
return TaskType_Sort_SORT1;
#else
#error Unknown SCAN_TYPE!
#endif
}
//------------------------------------------------------------------------
// Returns the right TaskType based on the SCAN_TYPE for PPS1
__device__ __forceinline__ TaskType taskChooseScanType1()
{
#if SCAN_TYPE == 0
return TaskType_Sort_PPS1;
#elif SCAN_TYPE == 1
return TaskType_Sort_PPS1_Up;
#elif SCAN_TYPE == 2 || SCAN_TYPE == 3
return TaskType_Sort_SORT1;
#else
#error Unknown SCAN_TYPE!
#endif
}
//------------------------------------------------------------------------
// Returns the right TaskType based on the AABB_TYPE
__device__ __forceinline__ TaskType taskChooseAABBType()
{
#if AABB_TYPE < 3
return TaskType_AABB_Min;
#elif AABB_TYPE == 3
return TaskType_AABB;
#endif
}
//------------------------------------------------------------------------
// Returns the right TaskType based on the SCAN_TYPE for PPS1
__device__ __forceinline__ TaskType taskChooseScanType2()
{
#if SCAN_TYPE == 0
return TaskType_Sort_PPS2;
#elif SCAN_TYPE == 1
return TaskType_Sort_PPS2_Up;
#elif SCAN_TYPE == 2 || SCAN_TYPE == 3
return TaskType_Sort_SORT2;
#else
#error Unknown SCAN_TYPE!
#endif
}
//------------------------------------------------------------------------
// Fetches ray from global memory
__device__ __forceinline__ void taskFetchRay(CUdeviceptr rays, int rayIdx, float3 &orig, float3 &dir, float &tmin, float &tmax)
{
float4 o = *((float4*)(rays + rayIdx * 32 + 0));
float4 d = *((float4*)(rays + rayIdx * 32 + 16));
orig = make_float3(o);
tmin = o.w;
dir = make_float3(d);
tmax = d.w;
}
//------------------------------------------------------------------------
// Fetches ray from global memory
__device__ __forceinline__ void taskFetchRayVolatile(CUdeviceptr rays, int rayIdx, float3 &orig, float3 &dir, float &tmin, float &tmax)
{
// We must read data as volatile or we can get deprected data
volatile float4 *po = (volatile float4*)(rays + rayIdx * 32 + 0);
volatile float4 *pd = (volatile float4*)(rays + rayIdx * 32 + 16);
orig.x = po->x, orig.y = po->y, orig.z = po->z;
dir.x = pd->x, dir.y = pd->y, dir.z = pd->z;
tmin = po->w;
tmax = pd->w;
}
//------------------------------------------------------------------------
// Fetches triangle from global memory
__device__ __forceinline__ void taskFetchTri(CUdeviceptr tris, int triIdx, float3 &v0, float3 &v1, float3 &v2)
{
#if 1
v0 = make_float3(tex1Dfetch(t_trisA, triIdx + 0));
v1 = make_float3(tex1Dfetch(t_trisA, triIdx + 1));
v2 = make_float3(tex1Dfetch(t_trisA, triIdx + 2));
#elif 0
v0 = make_float3(((float4*)tris)[triIdx + 0]);
v1 = make_float3(((float4*)tris)[triIdx + 1]);
v2 = make_float3(((float4*)tris)[triIdx + 2]);
#else
v0 = make_float3(*(float4*)&(((volatile float4*)tris)[triIdx + 0]));
v1 = make_float3(*(float4*)&(((volatile float4*)tris)[triIdx + 1]));
v2 = make_float3(*(float4*)&(((volatile float4*)tris)[triIdx + 2]));
#endif
}
//------------------------------------------------------------------------
// Fetches node from global memory
__device__ __forceinline__ void taskFetchNodeAddr(CUdeviceptr nodes, int nodeIdx, CudaBVHNode &node)
{
#if 0
// We must read data as volatile or we can get deprected data
volatile float4 *vc0xy = (volatile float4*)(nodes + nodeIdx + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
volatile float4 *vc1xy = (volatile float4*)(nodes + nodeIdx + 16); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
volatile float4 *vc01z = (volatile float4*)(nodes + nodeIdx + 32); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
volatile int4 *vchildren = (volatile int4*)(nodes + nodeIdx + 48); // (leftAddr, rightAddr, parentAddr, buildState)
node.c0xy.x = vc0xy->x, node.c0xy.y = vc0xy->y, node.c0xy.z = vc0xy->z, node.c0xy.w = vc0xy->w;
node.c1xy.x = vc1xy->x, node.c1xy.y = vc1xy->y, node.c1xy.z = vc1xy->z, node.c1xy.w = vc1xy->w;
node.c01z.x = vc01z->x, node.c01z.y = vc01z->y, node.c01z.z = vc01z->z, node.c01z.w = vc01z->w;
node.children.x = vchildren->x, node.children.y = vchildren->y, node.children.z = vchildren->z, node.children.w = vchildren->w;
#elif 0
CUdeviceptr addr = (nodes + nodeIdx);
asm("{\n\t"
"ld.volatile.v4.f32\t{%0, %1, %2, %3}, [%16];\n\t"
"ld.volatile.v4.f32\t{%4, %5, %6, %7}, [%16+16];\n\t"
"ld.volatile.v4.f32\t{%8, %9, %10, %11}, [%16+32];\n\t"
"ld.volatile.v4.u32\t{%12, %13, %14, %15}, [%16+48];\n\t"
"}"
: "=f"(node.c0xy.x), "=f"(node.c0xy.y), "=f"(node.c0xy.z), "=f"(node.c0xy.w),
"=f"(node.c1xy.x), "=f"(node.c1xy.y), "=f"(node.c1xy.z), "=f"(node.c1xy.w),
"=f"(node.c01z.x), "=f"(node.c01z.y), "=f"(node.c01z.z), "=f"(node.c01z.w),
"=r"(node.children.x), "=r"(node.children.y), "=r"(node.children.z), "=r"(node.children.w) : "r"(addr));
#elif 0 // Must be used with -Xptxas -dlcm=cg for correctness
node.c0xy = *((float4*)(nodes + nodeIdx + 0)); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
node.c1xy = *((float4*)(nodes + nodeIdx + 16)); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
node.c01z = *((float4*)(nodes + nodeIdx + 32)); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
node.children = *((int4*)(nodes + nodeIdx + 48)); // (leftAddr, rightAddr, parentAddr, buildState)
#else
node.c0xy = tex1Dfetch(t_nodesA, nodeIdx/16+0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
node.c1xy = tex1Dfetch(t_nodesA, nodeIdx/16+1); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
node.c01z = tex1Dfetch(t_nodesA, nodeIdx/16+2); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 temp = tex1Dfetch(t_nodesA, nodeIdx/16+3);
node.children.x =__float_as_int(temp.x);
node.children.y =__float_as_int(temp.y);
node.children.z =__float_as_int(temp.z);
node.children.w =__float_as_int(temp.w);
#endif
}
//------------------------------------------------------------------------
// Fetches node from global memory
__device__ __forceinline__ void taskFetchNode(CUdeviceptr nodes, int nodeIdx, CudaBVHNode &node)
{
#if 0
// We must read data as volatile or we can get deprected data
volatile float4 *vc0xy = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
volatile float4 *vc1xy = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 16); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
volatile float4 *vc01z = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 32); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
volatile int4 *vchildren = (volatile int4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 48); // (leftAddr, rightAddr, parentAddr, buildState)
node.c0xy.x = vc0xy->x, node.c0xy.y = vc0xy->y, node.c0xy.z = vc0xy->z, node.c0xy.w = vc0xy->w;
node.c1xy.x = vc1xy->x, node.c1xy.y = vc1xy->y, node.c1xy.z = vc1xy->z, node.c1xy.w = vc1xy->w;
node.c01z.x = vc01z->x, node.c01z.y = vc01z->y, node.c01z.z = vc01z->z, node.c01z.w = vc01z->w;
node.children.x = vchildren->x, node.children.y = vchildren->y, node.children.z = vchildren->z, node.children.w = vchildren->w;
#elif 0
CUdeviceptr addr = (nodes + nodeIdx * sizeof(CudaBVHNode));
asm("{\n\t"
"ld.volatile.v4.f32\t{%0, %1, %2, %3}, [%16];\n\t"
"ld.volatile.v4.f32\t{%4, %5, %6, %7}, [%16+16];\n\t"
"ld.volatile.v4.f32\t{%8, %9, %10, %11}, [%16+32];\n\t"
"ld.volatile.v4.u32\t{%12, %13, %14, %15}, [%16+48];\n\t"
"}"
: "=f"(node.c0xy.x), "=f"(node.c0xy.y), "=f"(node.c0xy.z), "=f"(node.c0xy.w),
"=f"(node.c1xy.x), "=f"(node.c1xy.y), "=f"(node.c1xy.z), "=f"(node.c1xy.w),
"=f"(node.c01z.x), "=f"(node.c01z.y), "=f"(node.c01z.z), "=f"(node.c01z.w),
"=r"(node.children.x), "=r"(node.children.y), "=r"(node.children.z), "=r"(node.children.w) : "r"(addr));
#elif 0 // Incorrect for some volativity reason
node.c0xy = *((float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 0)); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
node.c1xy = *((float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 16)); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
node.c01z = *((float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 32)); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
node.children = *((int4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 48)); // (leftAddr, rightAddr, parentAddr, buildState)
#else
node.c0xy = tex1Dfetch(t_nodesA, nodeIdx*4+0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
node.c1xy = tex1Dfetch(t_nodesA, nodeIdx*4+1); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
node.c01z = tex1Dfetch(t_nodesA, nodeIdx*4+2); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 temp = tex1Dfetch(t_nodesA, nodeIdx*4+3);
node.children.x =__float_as_int(temp.x);
node.children.y =__float_as_int(temp.y);
node.children.z =__float_as_int(temp.z);
node.children.w =__float_as_int(temp.w);
#endif
}
//------------------------------------------------------------------------
// Fetches node from global memory
__device__ __forceinline__ void taskFetchNodeVolatile(CUdeviceptr nodes, int nodeIdx, CudaBVHNode &node)
{
// We must read data as volatile or we can get deprected data
volatile float4 *vc0xy = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
volatile float4 *vc1xy = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 16); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
volatile float4 *vc01z = (volatile float4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 32); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
volatile int4 *vchildren = (volatile int4*)(nodes + nodeIdx * sizeof(CudaBVHNode) + 48); // (leftAddr, rightAddr, parentAddr, buildState)
node.c0xy.x = vc0xy->x, node.c0xy.y = vc0xy->y, node.c0xy.z = vc0xy->z, node.c0xy.w = vc0xy->w;
node.c1xy.x = vc1xy->x, node.c1xy.y = vc1xy->y, node.c1xy.z = vc1xy->z, node.c1xy.w = vc1xy->w;
node.c01z.x = vc01z->x, node.c01z.y = vc01z->y, node.c01z.z = vc01z->z, node.c01z.w = vc01z->w;
node.children.x = vchildren->x, node.children.y = vchildren->y, node.children.z = vchildren->z, node.children.w = vchildren->w;
}
//------------------------------------------------------------------------
// Copies node to the node array
__device__ __forceinline__ void taskSaveNodeToGMEM(CudaBVHNode* g_bvh, int tid, int nodeIdx, const volatile CudaBVHNode& node)
{
ASSERT_DIVERGENCE("taskSaveNodeToGMEM top", tid);
// Copy the data to global memory
int* nodeAddr = (int*)(&g_bvh[nodeIdx]);
if(tid < sizeof(CudaBVHNode)/sizeof(int))
nodeAddr[tid] = ((const volatile int*)&node)[tid]; // Every thread copies one word of data of its task
ASSERT_DIVERGENCE("taskSaveNodeToGMEM bottom", tid);
}
//------------------------------------------------------------------------
// Update the pointer in the parent to point to this node
__device__ void taskUpdateParentPtr(CudaBVHNode* g_bvh, int parentIdx, int taskID, int newValue)
{
// Update the parent pointers
if(parentIdx != -1) // Not for the root
{
#if 0
if(newTask->taskID == 0) // Left child
{
atomicExch(&g_bvh[parentIdx].children.x, newValue); // Inform the parent of the position of the child
//g_bvh[parentIdx].children.x = newValue;
//atomicAnd(&g_bvh[parentIdx].children.w, 0xFFFFFFFD); // Inform the parent the left child is ready
}
else
{
atomicExch(&g_bvh[parentIdx].children.y, newValue); // Inform the parent of the position of the child
//g_bvh[parentIdx].children.y = newValue;
//atomicAnd(&g_bvh[parentIdx].children.w, 0xFFFFFFFE); // Inform the parent the right child is ready
}
#else
//atomicExch(((int*)&g_bvh[parentIdx].children) + taskID , newValue);
*(((int*)&g_bvh[parentIdx].children) + taskID) = newValue;
#endif
}
}
//------------------------------------------------------------------------
// Update the pointer in the parent to point to this node
__device__ void taskUpdateParentPtr(CudaKdtreeNode* g_kdtree, int parentIdx, int taskID, int newValue)
{
// Update the parent pointers
if(parentIdx != -1) // Not for the root
{
//atomicExch(((int*)&g_bvh[parentIdx].children) + taskID , newValue);
*(((int*)&g_kdtree[parentIdx]) + taskID) = newValue;
}
}
//------------------------------------------------------------------------
// Computes plane dimension of axis aligned planes
__device__ __forceinline__ int getPlaneDimension(const float4& plane)
{
return -plane.y - plane.z*2;
}
//------------------------------------------------------------------------
// Computes distance of a point from a plane
__device__ __forceinline__ float planeDistance(const float3& normal, const float& d, const float3& p)
{
return dot(normal, p) + d;
}
//------------------------------------------------------------------------
// Creates plane from three points
__device__ __forceinline__ float4 set3PointPlane(const float3& v0, const float3& v1, const float3& v2)
{
float3 normal = normalize(cross(v0-v1, v2-v1));
float d = -dot(normal, v1);
return make_float4(normal, d);
}
//------------------------------------------------------------------------
// Computes which side of the plane is a triangle on
__device__ __forceinline__ int getPlanePosition(const float4& plane, const float3& v0, const float3& v1, const float3& v2)
{
// Fetch plane
float3 normal;
normal.x = plane.x;
normal.y = plane.y;
normal.z = plane.z;
float d = plane.w;
int mn = 0;
int mx = 0;
float vd0, vd1, vd2; // Vertex distance
#if 1
// OPTIMIZE: Get rid of conditionals?
vd0 = planeDistance(normal, d, v0);
if(vd0 < EPS)
mn = -1;
if(vd0 > -EPS)
mx = 1;
vd1 = planeDistance(normal, d, v1);
if(vd1 < EPS)
mn = -1;
if(vd1 > -EPS)
mx = 1;
vd2 = planeDistance(normal, d, v2);
if(vd2 < EPS)
mn = -1;
if(vd2 > -EPS)
mx = 1;
#else
if(normal.x == -1.f)
{
int sgn1, sgn2;
sgn1 = signbit(v0.x - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v0.x - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v1.x - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v1.x - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v2.x - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v2.x - d - EPS);
mx = max(2*sgn2-1, mx);
}
else if(normal.y == -1.f)
{
int sgn1, sgn2;
sgn1 = signbit(v0.y - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v0.y - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v1.y - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v1.y - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v2.y - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v2.y - d - EPS);
mx = max(2*sgn2-1, mx);
}
else
{
int sgn1, sgn2;
sgn1 = signbit(v0.z - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v0.z - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v1.z - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v1.z - d - EPS);
mx = max(2*sgn2-1, mx);
sgn1 = signbit(v2.z - d + EPS);
mn = min(2*sgn1-1, mn);
sgn2 = signbit(v2.z - d - EPS);
mx = max(2*sgn2-1, mx);
}
#endif
return -(mn + mx);
}
//------------------------------------------------------------------------
__device__ __forceinline__ void getAABB(const float3& v0, const float3& v1, const float3& v2, CudaAABB& tbox)
{
tbox.m_mn.x = fminf(fminf(v0.x, v1.x), v2.x);
tbox.m_mn.y = fminf(fminf(v0.y, v1.y), v2.y);
tbox.m_mn.z = fminf(fminf(v0.z, v1.z), v2.z);
tbox.m_mx.x = fmaxf(fmaxf(v0.x, v1.x), v2.x);
tbox.m_mx.y = fmaxf(fmaxf(v0.y, v1.y), v2.y);
tbox.m_mx.z = fmaxf(fmaxf(v0.z, v1.z), v2.z);
}
//------------------------------------------------------------------------
// Computes the box and the centroid of a triangle
__device__ __forceinline__ float3 getCentroid(const float3& v0, const float3& v1, const float3& v2, CudaAABB& tbox)
{
getAABB(v0, v1, v2, tbox);
return (tbox.m_mn + tbox.m_mx)*0.5f;
}
//------------------------------------------------------------------------
// Computes which side of the plane is the point on based on its centroid
__device__ __forceinline__ int getPlaneCentroidPosition(const float4& plane, const float3& v0, const float3& v1, const float3& v2, CudaAABB& tbox)
{
// Fetch plane
float3 normal;
normal.x = plane.x;
normal.y = plane.y;
normal.z = plane.z;
float d = plane.w;
int pos;
float3 centroid = getCentroid(v0, v1, v2, tbox);
float ctd = planeDistance(normal, d, centroid);
if(ctd < EPS)
pos = -1;
else
pos = 1;
return pos;
}
//------------------------------------------------------------------------
// Split triangle bounding box based on spatial split location
__device__ __forceinline__ int getPlanePositionClipped(const float4& plane, const float3& v0, const float3& v1, const float3& v2, const CudaAABB& nodeBox)
{
int dim = getPlaneDimension(plane);
float split = plane.w;
CudaAABB triBox, triBoxL, triBoxR;
getAABB(v0, v1, v2, triBox);
// Because GPUs do not support register indexing we have to switch execution based on dimension
switch(dim)
{
case 0:
//initializing tight AABBs only for splitting dimension
triBoxL.m_mn.x = triBox.m_mn.x;
triBoxR.m_mx.x = triBox.m_mx.x;
triBoxL.m_mx.x = triBoxR.m_mn.x = split;
//two remaining dimensions are recomputed
{
//reordering vertices’ indices
const float3* _min = (v1.x <= v0.x) ? &v1 : &v0;
const float3* _max = (v1.x <= v0.x) ? &v0 : &v1;
const float3* vertMin = (v2.x < _min->x) ? &v2 : _min;
const float3* vertMax = (v2.x >= _max->x) ? &v2 : _max;
const float3* vertMid = (&v0 != vertMin && &v0 != vertMax) ? &v0 : ((&v1 != vertMin && &v1 != vertMax) ? &v1 : &v2);
const bool conda = split <= vertMid->x;
const float3* iA = conda ? vertMin : vertMax;
const float3* iB = vertMid;
const float3* iC = conda ? vertMax : vertMin;
const float ratio_ab = (split-iA->x)/(iB->x-iA->x);
const float ratio_cd = (split-iA->x)/(iC->x-iA->x);
const float x0 = iA->y + ratio_ab*(iB->y-iA->y);
const float x1 = iA->y + ratio_cd*(iC->y-iA->y);
const float xmin = fminf(x0, x1);
const float xmax = fmaxf(x0, x1);
if(conda){
triBoxL.m_mn.y = fminf(xmin, iA->y);
triBoxL.m_mx.y = fmaxf(xmax, iA->y);
triBoxR.m_mn.y = fminf(xmin, fminf(iB->y, iC->y));
triBoxR.m_mx.y = fmaxf(xmax, fmaxf(iB->y, iC->y));
}else{
triBoxR.m_mn.y = fminf(xmin, iA->y);
triBoxR.m_mx.y = fmaxf(xmax, iA->y);
triBoxL.m_mn.y = fminf(xmin, fminf(iB->y, iC->y));
triBoxL.m_mx.y = fmaxf(xmax, fmaxf(iB->y, iC->y));
}
const float y0 = iA->z + ratio_ab*(iB->z-iA->z);
const float y1 = iA->z + ratio_cd*(iC->z-iA->z);
const float ymin = fminf(y0, y1);
const float ymax = fmaxf(y0, y1);
if(conda){
triBoxL.m_mn.z = fminf(ymin, iA->z);
triBoxL.m_mx.z = fmaxf(ymax, iA->z);
triBoxR.m_mn.z = fminf(ymin, fminf(iB->z, iC->z));
triBoxR.m_mx.z = fmaxf(ymax, fmaxf(iB->z, iC->z));
}else{
triBoxR.m_mn.z = fminf(ymin, iA->z);
triBoxR.m_mx.z = fmaxf(ymax, iA->z);
triBoxL.m_mn.z = fminf(ymin, fminf(iB->z, iC->z));
triBoxL.m_mx.z = fmaxf(ymax, fmaxf(iB->z, iC->z));
}
}
break;
case 1:
//initializing tight AABBs only for splitting dimension
triBoxL.m_mn.y = triBox.m_mn.y;
triBoxR.m_mx.y = triBox.m_mx.y;
triBoxL.m_mx.y = triBoxR.m_mn.y = split;
//two remaining dimensions are recomputed
{
//reordering vertices’ indices
const float3* _min = (v1.y <= v0.y) ? &v1 : &v0;
const float3* _max = (v1.y <= v0.y) ? &v0 : &v1;
const float3* vertMin = (v2.y < _min->y) ? &v2 : _min;
const float3* vertMax = (v2.y >= _max->y) ? &v2 : _max;
const float3* vertMid = (&v0 != vertMin && &v0 != vertMax) ? &v0 : ((&v1 != vertMin && &v1 != vertMax) ? &v1 : &v2);
const bool conda = split <= vertMid->y;
const float3* iA = conda ? vertMin : vertMax;
const float3* iB = vertMid;
const float3* iC = conda ? vertMax : vertMin;
const float ratio_ab = (split-iA->y)/(iB->y-iA->y);
const float ratio_cd = (split-iA->y)/(iC->y-iA->y);
const float x0 = iA->x + ratio_ab*(iB->x-iA->x);
const float x1 = iA->x + ratio_cd*(iC->x-iA->x);
const float xmin = fminf(x0, x1);
const float xmax = fmaxf(x0, x1);
if(conda){
triBoxL.m_mn.x = fminf(xmin, iA->x);
triBoxL.m_mx.x = fmaxf(xmax, iA->x);
triBoxR.m_mn.x = fminf(xmin, fminf(iB->x, iC->x));
triBoxR.m_mx.x = fmaxf(xmax, fmaxf(iB->x, iC->x));
}else{
triBoxR.m_mn.x = fminf(xmin, iA->x);
triBoxR.m_mx.x = fmaxf(xmax, iA->x);
triBoxL.m_mn.x = fminf(xmin, fminf(iB->x, iC->x));
triBoxL.m_mx.x = fmaxf(xmax, fmaxf(iB->x, iC->x));
}
const float y0 = iA->z + ratio_ab*(iB->z-iA->z);
const float y1 = iA->z + ratio_cd*(iC->z-iA->z);
const float ymin = fminf(y0, y1);
const float ymax = fmaxf(y0, y1);
if(conda){
triBoxL.m_mn.z = fminf(ymin, iA->z);
triBoxL.m_mx.z = fmaxf(ymax, iA->z);
triBoxR.m_mn.z = fminf(ymin, fminf(iB->z, iC->z));
triBoxR.m_mx.z = fmaxf(ymax, fmaxf(iB->z, iC->z));
}else{
triBoxR.m_mn.z = fminf(ymin, iA->z);
triBoxR.m_mx.z = fmaxf(ymax, iA->z);
triBoxL.m_mn.z = fminf(ymin, fminf(iB->z, iC->z));
triBoxL.m_mx.z = fmaxf(ymax, fmaxf(iB->z, iC->z));
}
}
break;
case 2:
//initializing tight AABBs only for splitting dimension
triBoxL.m_mn.z = triBox.m_mn.z;
triBoxR.m_mx.z = triBox.m_mx.z;
triBoxL.m_mx.z = triBoxR.m_mn.z = split;
//two remaining dimensions are recomputed
{
//reordering vertices’ indices
const float3* _min = (v1.z <= v0.z) ? &v1 : &v0;
const float3* _max = (v1.z <= v0.z) ? &v0 : &v1;
const float3* vertMin = (v2.z < _min->z) ? &v2 : _min;
const float3* vertMax = (v2.z >= _max->z) ? &v2 : _max;
const float3* vertMid = (&v0 != vertMin && &v0 != vertMax) ? &v0 : ((&v1 != vertMin && &v1 != vertMax) ? &v1 : &v2);
const bool conda = split <= vertMid->z;
const float3* iA = conda ? vertMin : vertMax;
const float3* iB = vertMid;
const float3* iC = conda ? vertMax : vertMin;
const float ratio_ab = (split-iA->z)/(iB->z-iA->z);
const float ratio_cd = (split-iA->z)/(iC->z-iA->z);
const float x0 = iA->y + ratio_ab*(iB->y-iA->y);
const float x1 = iA->y + ratio_cd*(iC->y-iA->y);
const float xmin = fminf(x0, x1);
const float xmax = fmaxf(x0, x1);
if(conda){
triBoxL.m_mn.y = fminf(xmin, iA->y);
triBoxL.m_mx.y = fmaxf(xmax, iA->y);
triBoxR.m_mn.y = fminf(xmin, fminf(iB->y, iC->y));
triBoxR.m_mx.y = fmaxf(xmax, fmaxf(iB->y, iC->y));
}else{
triBoxR.m_mn.y = fminf(xmin, iA->y);
triBoxR.m_mx.y = fmaxf(xmax, iA->y);
triBoxL.m_mn.y = fminf(xmin, fminf(iB->y, iC->y));
triBoxL.m_mx.y = fmaxf(xmax, fmaxf(iB->y, iC->y));
}
const float y0 = iA->x + ratio_ab*(iB->x-iA->x);
const float y1 = iA->x + ratio_cd*(iC->x-iA->x);
const float ymin = fminf(y0, y1);
const float ymax = fmaxf(y0, y1);
if(conda){
triBoxL.m_mn.x = fminf(ymin, iA->x);
triBoxL.m_mx.x = fmaxf(ymax, iA->x);
triBoxR.m_mn.x = fminf(ymin, fminf(iB->x, iC->x));
triBoxR.m_mx.x = fmaxf(ymax, fmaxf(iB->x, iC->x));
}else{
triBoxR.m_mn.x = fminf(ymin, iA->x);
triBoxR.m_mx.x = fmaxf(ymax, iA->x);
triBoxL.m_mn.x = fminf(ymin, fminf(iB->x, iC->x));
triBoxL.m_mx.x = fmaxf(ymax, fmaxf(iB->x, iC->x));
}
}
break;
}
float3 intersectMn = fmaxf(triBoxL.m_mn, nodeBox.m_mn);
float3 intersectMx = fminf(triBoxL.m_mx, nodeBox.m_mx);
bool leftIsect = (intersectMn.x <= intersectMx.x) && (intersectMn.y <= intersectMx.y) && (intersectMn.z <= intersectMx.z);
intersectMn = fmaxf(triBoxR.m_mn, nodeBox.m_mn);
intersectMx = fminf(triBoxR.m_mx, nodeBox.m_mx);
bool rightIsect = (intersectMn.x <= intersectMx.x) && (intersectMn.y <= intersectMx.y) && (intersectMn.z <= intersectMx.z);
return -1*leftIsect + 1*rightIsect;
}
inline __host__ __device__ double3 operator+(double3 a, double3 b)
{
return make_double3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __host__ __device__ double3 operator-(double3 a, double3 b)
{
return make_double3(a.x - b.x, a.y - b.y, a.z - b.z);
}
inline __host__ __device__ double3 operator*(double3 a, float b)
{
return make_double3(a.x * b, a.y * b, a.z * b);
}
//------------------------------------------------------------------------
__device__ __forceinline__ void boxCenterHalfSize(const CudaAABB& nodeBox, float3& center, float3& halfSize)
{
center = (nodeBox.m_mn + nodeBox.m_mx)*0.5f;
halfSize = (nodeBox.m_mx - nodeBox.m_mn)*0.5f/* + 2000*EPS*/;
/*double3 cD, hD;
cD = (make_double3(nodeBox.m_mn.x, nodeBox.m_mn.y, nodeBox.m_mn.z) + make_double3(nodeBox.m_mx.x, nodeBox.m_mx.y, nodeBox.m_mx.z))*0.5;
hD = (make_double3(nodeBox.m_mx.x, nodeBox.m_mx.y, nodeBox.m_mx.z) - make_double3(nodeBox.m_mn.x, nodeBox.m_mn.y, nodeBox.m_mn.z))*0.5;
center = make_float3(cD.x, cD.y, cD.z);
halfSize = make_float3(hD.x, hD.y, hD.z);*/
}
//------------------------------------------------------------------------
// Compute triangle's position wrt splitting plane by computing its intersection with children bounding boxes
__device__ __forceinline__ int getTriChildOverlap(const float4& plane, const float3& v0, const float3& v1, const float3& v2, const CudaAABB& nodeBox)
{
int dim = getPlaneDimension(plane);
float split = plane.w;
CudaAABB nodeBoxL, nodeBoxR;
nodeBoxL = nodeBoxR = nodeBox;
// Because GPUs do not support register indexing we have to switch execution based on dimension
switch(dim)
{
case 0:
nodeBoxL.m_mx.x = nodeBoxR.m_mn.x = split;
break;
case 1:
nodeBoxL.m_mx.y = nodeBoxR.m_mn.y = split;
break;
case 2:
nodeBoxL.m_mx.z = nodeBoxR.m_mn.z = split;
break;
}
float3 boxCenterL, boxHalfSizeL;
boxCenterHalfSize(nodeBoxL, boxCenterL, boxHalfSizeL);
int leftIsect = triBoxOverlap(boxCenterL, boxHalfSizeL, v0, v1, v2, nodeBoxL.m_mn, nodeBoxL.m_mx);
float3 boxCenterR, boxHalfSizeR;
boxCenterHalfSize(nodeBoxR, boxCenterR, boxHalfSizeR);
int rightIsect = triBoxOverlap(boxCenterR, boxHalfSizeR, v0, v1, v2, nodeBoxR.m_mn, nodeBoxR.m_mx);
if(leftIsect == 0 && rightIsect == 0) // Should not happen, but happens due to numerical imprecision
{
//printf("Cannot happen!\n");
return -1;
}
return -1*leftIsect + 1*rightIsect;
}
//------------------------------------------------------------------------
// Computes which side of the plane is a ray on
__device__ __forceinline__ int getPlanePosition(const float4& plane, const float3& orig, const float3& dir, const float& tmin, const float& tmax, int& orderCounter)
{
// Fetch plane
float3 normal;
normal.x = plane.x;
normal.y = plane.y;
normal.z = plane.z;
float d = plane.w;
int retVal;
#if 0
int min = 0;
int max = 0;
float d1 = planeDistance(normal, d, orig + tmin*dir);
float d2 = planeDistance(normal, d, orig + tmax*dir);
// OPTIMIZE: Get rid of conditionals?
if (d1 < EPS)
min = -1;
if (d1 > -EPS)
max = 1;
if (d2 < EPS)
min = -1;
if (d2 > -EPS)
max = 1;
retVal = min + max;
#else
float dv = dot(dir, normal);
orderCounter = 0;
#define COPLANAR_EPS 1e-30f
if(dv < -COPLANAR_EPS)
{
// the ray will hit from the front side
float t = -planeDistance(normal, d, orig) / dv;
if (t > tmax + EPS)
retVal = 1;
else if (t < tmin - EPS)
retVal = -1;
else
{
// hits the plane from front to back
orderCounter = -1;
retVal = 0;
}
}
else if(dv > COPLANAR_EPS)
{
// the ray will hit from the front side
float t = -planeDistance(normal, d, orig) / dv;
if (t > tmax + EPS)
retVal = -1;
else if (t < tmin - EPS)
retVal = 1;
else
{
// hits the plane from back to front
orderCounter = 1;
retVal = 0;
}
}
else
{
int min = 0;
int max = 0;
float d1 = planeDistance(normal, d, orig + tmin*dir);
float d2 = planeDistance(normal, d, orig + tmax*dir);
// OPTIMIZE: Get rid of conditionals?
if (d1 < EPS)
min = -1;
if (d1 > -EPS)
max = 1;
if (d2 < EPS)
min = -1;
if (d2 > -EPS)
max = 1;
retVal = min + max;
}
#endif
return retVal;
}
//------------------------------------------------------------------------
// Computes the number of samples for the cost function
__device__ __host__ __forceinline__ int getNumberOfSamples(const int& number)
{
return (int)sqrtf(number);
}
//------------------------------------------------------------------------
// Computes area of the bounding box
__device__ __forceinline__ float areaAABB(const volatile CudaAABB& bbox)
{
float3 d;
d.x = bbox.m_mx.x - bbox.m_mn.x;
d.y = bbox.m_mx.y - bbox.m_mn.y;
d.z = bbox.m_mx.z - bbox.m_mn.z;
return (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
}
//------------------------------------------------------------------------
// Computes areas of left and right parts of bounding box divided by x
__device__ __forceinline__ void areaAABBX(const volatile CudaAABB& bbox, float pos, float& areaLeft, float& areaRight)
{
float3 d;
d.x = pos - bbox.m_mn.x;
d.y = bbox.m_mx.y - bbox.m_mn.y;
d.z = bbox.m_mx.z - bbox.m_mn.z;
areaLeft = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
d.x = bbox.m_mx.x - pos;
areaRight = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
}
//------------------------------------------------------------------------
// Computes areas of left and right parts of bounding box divided by y
__device__ __forceinline__ void areaAABBY(const volatile CudaAABB& bbox, float pos, float& areaLeft, float& areaRight)
{
float3 d;
d.x = bbox.m_mx.x - bbox.m_mn.x;
d.y = pos - bbox.m_mn.y;
d.z = bbox.m_mx.z - bbox.m_mn.z;
areaLeft = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
d.y = bbox.m_mx.y - pos;
areaRight = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
}
//------------------------------------------------------------------------
// Computes areas of left and right parts of bounding box divided by x
__device__ __forceinline__ void areaAABBZ(const volatile CudaAABB& bbox, float pos, float& areaLeft, float& areaRight)
{
float3 d;
d.x = bbox.m_mx.x - bbox.m_mn.x;
d.y = bbox.m_mx.y - bbox.m_mn.y;
d.z = pos - bbox.m_mn.z;
areaLeft = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
d.z = bbox.m_mx.z - pos;
areaRight = (d.x*d.y + d.y*d.z + d.z*d.x)*2.0f;
}
//------------------------------------------------------------------------
// Choose axis based on Havran's lonest-axis + round-robin mixture
__device__ __forceinline__ int taskAxis(volatile float4& plane, const volatile CudaAABB& bbox, volatile int &sharedInt, int axis)
{
volatile float* tPln = ((volatile float*)&plane)+threadIdx.x;
volatile float* tMin = ((volatile float*)&bbox.m_mn)+threadIdx.x;
volatile float* tMax = ((volatile float*)&bbox.m_mx)+threadIdx.x;
// Compute longest axis
if(threadIdx.x < 3)
{
*tPln = *tMax - *tMin;
float dMax = max3(plane.x, plane.y, plane.z);
if(__ffs(__ballot(dMax == *tPln)) == threadIdx.x+1) // First thread with such condition
{
sharedInt = threadIdx.x; // Longest axis
}
}
int warpIdx = blockDim.y*blockIdx.x + threadIdx.y; // Warp ID
return ((warpIdx & 0x3) != 0) ? axis : sharedInt;
}
//------------------------------------------------------------------------
// Splits the node with bounding box's spatial median along the longest axis
__device__ void splitMedian(int tid, int axis, volatile float4& plane, const volatile CudaAABB& bbox)
{
ASSERT_DIVERGENCE("splitMedian", tid);
volatile float* tPln = ((volatile float*)&plane)+tid;
volatile float* tMin = ((volatile float*)&bbox.m_mn)+tid;
volatile float* tMax = ((volatile float*)&bbox.m_mx)+tid;
#if 0 // Longest axis
// Compute spatial median
if(tid < 3)
{
#if 1
*tPln = *tMax - *tMin;
float dMax = max3(plane.x, plane.y, plane.z);
if(__ffs(__ballot(dMax == *tPln)) == tid+1) // First thread with such condition
{
plane.w = -(*tMin + *tMax) / 2.0f;
*tPln = 1;
}
else
{
*tPln = 0;
}
#else
if(tid == 0) // Single thread median split
{
if(dMax == plane[threadIdx.y].x)
{
plane[threadIdx.y].x = 1;
plane[threadIdx.y].w = -(bbox.m_mn.x + bbox.m_mx.x) / 2.0f;
}
else
plane[threadIdx.y].x = 0;
if(dMax == plane[threadIdx.y].y)
{
plane[threadIdx.y].y = 1;
plane[threadIdx.y].w = -(bbox.m_mn.y + bbox.m_mx.y) / 2.0f;
}
else
plane[threadIdx.y].y = 0;
if(dMax == plane[threadIdx.y].z)
{
plane[threadIdx.y].z = 1;
plane[threadIdx.y].w = -(bbox.m_mn.z + bbox.m_mx.z) / 2.0f;
}
else
plane[threadIdx.y].z = 0;
}
#endif
}
#else // Round robin
//int axis = depth % 3;
if(tid < 3)
{
*tPln = *tMax - *tMin;
if(tid == axis)
{
plane.w = -(*tMin + *tMax) / 2.0f;
*tPln = 1;
}
else
{
*tPln = 0;
}
}
#endif
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread based on AABB
__device__ void findPlaneAABB(int planePos, const volatile CudaAABB& bbox, float4& plane, int numAxisAlignedPlanes)
{
//ASSERT_DIVERGENCE("findPlaneAABB", threadIdx.x);
#if 1 // Equal number of planes in each dimension
int planesPerAxis = ((numAxisAlignedPlanes+2) / 3);
int axis = planePos / planesPerAxis;
float rpos = (float)(1 + (planePos % planesPerAxis))/(float)(planesPerAxis+1);
if(axis == 0)
{
float pos = bbox.m_mn.x + (bbox.m_mx.x - bbox.m_mn.x) * rpos;
plane = make_float4(-1.f, 0.f, 0.f, pos);
}
else if(axis == 1)
{
float pos = bbox.m_mn.y + (bbox.m_mx.y - bbox.m_mn.y) * rpos;
plane = make_float4(0.f, -1.f, 0.f, pos);
}
else
{
float pos = bbox.m_mn.z + (bbox.m_mx.z - bbox.m_mn.z) * rpos;
plane = make_float4(0.f, 0.f, -1.f, pos);
}
#else
float lX = bbox.m_mx.x - bbox.m_mn.x;
float lY = bbox.m_mx.y - bbox.m_mn.y;
float lZ = bbox.m_mx.z - bbox.m_mn.z;
float sumLengths = lX + lY + lZ;
// Assign the planes to different methods
int numX = lX/sumLengths*PLANE_COUNT+0.5f;
int numY = lY/sumLengths*PLANE_COUNT+0.5f;
int numZ = lZ/sumLengths*PLANE_COUNT+0.5f;
//int axis = (planePos < numX) ? 0 : (planePos < numX+numY) ? 1 : 2;
int axis = (planePos >= numX) + (planePos >= numX+numY);
if(axis == 0)
{
float rpos = (float)(planePos+1) / (float)(numX+1);
float pos = bbox.m_mn.x + lX * rpos;
plane = make_float4(-1.f, 0.f, 0.f, pos);
}
else if(axis == 1)
{
float rpos = (float)(planePos-numX+1) / (float)(numY+1);
float pos = bbox.m_mn.y + lY * rpos;
plane = make_float4(0.f, -1.f, 0.f, pos);
}
else
{
float rpos = (float)(planePos-numX-numY+1) / (float)(numZ+1);
float pos = bbox.m_mn.z + lZ * rpos;
plane = make_float4(0.f, 0.f, -1.f, pos);
}
#endif
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread
__device__ void findPlaneAABB(int planePos, const volatile CudaAABB& bbox, float& areaLeft, float& areaRight, float4& plane, int numAxisAlignedPlanes)
{
//ASSERT_DIVERGENCE("findPlaneAABB", threadIdx.x);
#if 1 // Equal number of planes in each dimension
int planesPerAxis = ((numAxisAlignedPlanes+2) / 3);
int axis = planePos / planesPerAxis;
float rpos = (float)( 1 + (planePos % planesPerAxis))/(float)(planesPerAxis+1);
if(axis == 0)
{
float pos = bbox.m_mn.x + (bbox.m_mx.x - bbox.m_mn.x) * rpos;
plane = make_float4(-1.f, 0.f, 0.f, pos);
areaAABBX(bbox, pos, areaLeft, areaRight);
}
else if(axis == 1)
{
float pos = bbox.m_mn.y + (bbox.m_mx.y - bbox.m_mn.y) * rpos;
plane = make_float4(0.f, -1.f, 0.f, pos);
areaAABBY(bbox, pos, areaLeft, areaRight);
}
else
{
float pos = bbox.m_mn.z + (bbox.m_mx.z - bbox.m_mn.z) * rpos;
plane = make_float4(0.f, 0.f, -1.f, pos);
areaAABBZ(bbox, pos, areaLeft, areaRight);
}
#else
float lX = bbox.m_mx.x - bbox.m_mn.x;
float lY = bbox.m_mx.y - bbox.m_mn.y;
float lZ = bbox.m_mx.z - bbox.m_mn.z;
float sumLengths = lX + lY + lZ;
// Assign the planes to different methods
int numX = lX/sumLengths*PLANE_COUNT+0.5f;
int numY = lY/sumLengths*PLANE_COUNT+0.5f;
int numZ = lZ/sumLengths*PLANE_COUNT+0.5f;
//int axis = (planePos < numX) ? 0 : (planePos < numX+numY) ? 1 : 2;
int axis = (planePos >= numX) + (planePos >= numX+numY);
if(axis == 0)
{
float rpos = (float)(planePos+1) / (float)(numX+1);
float pos = bbox.m_mn.x + lX * rpos;
plane = make_float4(-1.f, 0.f, 0.f, pos);
areaAABBX(bbox, pos, areaLeft, areaRight);
}
else if(axis == 1)
{
float rpos = (float)(planePos-numX+1) / (float)(numY+1);
float pos = bbox.m_mn.y + lY * rpos;
plane = make_float4(0.f, -1.f, 0.f, pos);
areaAABBY(bbox, pos, areaLeft, areaRight);
}
else
{
float rpos = (float)(planePos-numX-numY+1) / (float)(numZ+1);
float pos = bbox.m_mn.z + lZ * rpos;
plane = make_float4(0.f, 0.f, -1.f, pos);
areaAABBZ(bbox, pos, areaLeft, areaRight);
}
#endif
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread
__device__ void findPlaneTriAABB(int planePos, float4* tris, int* trisIndex, int triStart, const volatile CudaAABB& bbox, float& areaLeft, float& areaRight, float4& plane, int numAxisAlignedPlanes)
{
//ASSERT_DIVERGENCE("findPlaneTriAABB", threadIdx.x);
int tri = planePos / 6;
int axis = (planePos % 6) / 2;
int lim = (planePos % 6) - axis;
int triidx = trisIndex[triStart + tri]*3;
// Fetch triangle
float3 v0, v1, v2;
taskFetchTri((CUdeviceptr)tris, triidx, v0, v1, v2);
// Get bounding box
CudaAABB tbox;
getAABB(v0, v1, v0, tbox);
if(axis == 0)
{
float pos;
if(lim == 0)
pos = tbox.m_mn.x;
else
pos = tbox.m_mx.x;
plane = make_float4(-1.f, 0.f, 0.f, pos);
areaAABBX(bbox, pos, areaLeft, areaRight);
}
else if(axis == 1)
{
float pos;
if(lim == 0)
pos = tbox.m_mn.y;
else
pos = tbox.m_mx.y;
plane = make_float4(0.f, -1.f, 0.f, pos);
areaAABBY(bbox, pos, areaLeft, areaRight);
}
else
{
float pos;
if(lim == 0)
pos = tbox.m_mn.z;
else
pos = tbox.m_mx.z;
plane = make_float4(0.f, 0.f, -1.f, pos);
areaAABBZ(bbox, pos, areaLeft, areaRight);
}
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread based on triangle division
__device__ void findPlaneTriAA(int planePos, CUdeviceptr tris, CUdeviceptr trisIndex, int triStart, int triEnd, float4& plane, int numAxisAlignedPlanes)
{
int planesPerAxis = ((numAxisAlignedPlanes+2) / 3);
int axis = planePos / planesPerAxis;
int triNum = triEnd - triStart;
/*unsigned int hashA = planePos;
unsigned int hashB = 0x9e3779b9u;
unsigned int hashC = 0x9e3779b9u;
jenkinsMix(hashA, hashB, hashC);
jenkinsMix(hashA, hashB, hashC);
int triidx = ((int*)trisIndex)[triStart + (hashC % triNum)]*3;*/
float tpos = (float)(planePos % planesPerAxis)/(float)(planesPerAxis-1);
int triidx = ((int*)trisIndex)[triStart + (int)(tpos * (triNum-1))]*3;
// Fetch triangle
float3 v0, v1, v2;
taskFetchTri(tris, triidx, v0, v1, v2);
// Compute triangle centroid
CudaAABB tbox;
float3 cent = getCentroid(v0, v1, v2, tbox);
// Compute axis aligned plane through its centoid
if(axis == 0)
{
plane = make_float4(-1.f, 0.f, 0.f, cent.x);
}
else if(axis == 1)
{
plane = make_float4(0.f, -1.f, 0.f, cent.y);
}
else
{
plane = make_float4(0.f, 0.f, -1.f, cent.z);
}
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread based on triangle division
__device__ void findPlaneTri(int planePos, CUdeviceptr tris, CUdeviceptr trisIndex, int triStart, int triEnd, float4& plane)
{
ASSERT_DIVERGENCE("findPlaneTri", threadIdx.x);
int triNum = triEnd - triStart;
unsigned int hashA = planePos;
unsigned int hashB = 0x9e3779b9u;
unsigned int hashC = 0x9e3779b9u;
jenkinsMix(hashA, hashB, hashC);
jenkinsMix(hashA, hashB, hashC);
int triidx = ((int*)trisIndex)[triStart + (hashC % triNum)]*3;
// Fetch triangle
float3 v0, v1, v2;
taskFetchTri(tris, triidx, v0, v1, v2);
plane = set3PointPlane(v0, v1, v2);
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread based on ray division
__device__ void findPlaneRay(int planePos, CUdeviceptr rays, CUdeviceptr raysIndex, int rayStart, int rayEnd, float4& plane)
{
ASSERT_DIVERGENCE("findPlaneRay", threadIdx.x);
// BUG: Fails because of unclipped rays
// Good strategy - only for primary rays
// partitioning using an edge of random triangle and camera origin
// RAY1 min / RAY1 max / RAY2 min
int rayNum = rayEnd - rayStart;
unsigned int hashA = planePos;
unsigned int hashB = 0x9e3779b9u;
unsigned int hashC = 0x9e3779b9u;
jenkinsMix(hashA, hashB, hashC);
jenkinsMix(hashA, hashB, hashC);
int raypos1 = rayStart + (hashC % rayNum);
int rayidx1 = ((int*)raysIndex)[raypos1];
float3 orig, dir;
float tmin, tmax;
taskFetchRay(rays, rayidx1, orig, dir, tmin, tmax);
float3 v0 = orig + tmin*dir;
float3 v1 = orig + tmax*dir;
int raypos2 = raypos1+1;
if(raypos2 >= rayEnd)
raypos2 = rayStart;
int rayidx2 = ((int*)raysIndex)[raypos2];
taskFetchRay(rays, rayidx2, orig, dir, tmin, tmax);
float3 v2 = orig + tmax*dir;
if(hashA & 0x1)
v2 = v1 + cross(v1-v0, v2-v1);
plane = set3PointPlane(v0, v1, v2);
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread
__device__ void findPlane(int planePos, CUdeviceptr rays, CUdeviceptr raysIndex, int rayStart, int rayEnd, CUdeviceptr tris, CUdeviceptr trisIndex, int triStart, int triEnd, const volatile CudaAABB& bbox, int numAxisAlignedPlanes, int numTriangleBasedPlanes, float4& plane)
{
ASSERT_DIVERGENCE("findPlane", threadIdx.x);
if(planePos < numAxisAlignedPlanes) // Choose axis aligned plane
{
findPlaneAABB(planePos, bbox, plane, numAxisAlignedPlanes);
}
else if(planePos < numAxisAlignedPlanes + numTriangleBasedPlanes) // Choose triangle based plane
{
findPlaneTri(planePos, tris, trisIndex, triStart, triEnd, plane);
}
else // Choose ray based plane
{
findPlaneRay(planePos, rays, raysIndex, rayStart, rayEnd, plane);
}
}
//------------------------------------------------------------------------
// Compute a splitting plane for each thread in the chosen axis
__device__ void findPlaneRobin(int planePos, const volatile CudaAABB& bbox, int axis, float4& plane)
{
ASSERT_DIVERGENCE("findPlaneRobin", threadIdx.x);
float rpos = (float)(planePos+1) / (float)(WARP_SIZE+1);
if(axis == 0)
{
float pos = bbox.m_mn.x + (bbox.m_mx.x - bbox.m_mn.x) * rpos;
plane = make_float4(-1.f, 0.f, 0.f, pos);
}
else if(axis == 1)
{
float pos = bbox.m_mn.y + (bbox.m_mx.y - bbox.m_mn.y) * rpos;
plane = make_float4(0.f, -1.f, 0.f, pos);
}
else
{
float pos = bbox.m_mn.z + (bbox.m_mx.z - bbox.m_mn.z) * rpos;
plane = make_float4(0.f, 0.f, -1.f, pos);
}
}
//------------------------------------------------------------------------
// Computes Woop triangle from a regular one
__device__ void calcWoop(float3& v0, float3& v1, float3& v2, float4& o0, float4& o1, float4& o2)
{
// Compute woop
float3 c0 = v0 - v2;
float3 c1 = v1 - v2;
float3 c2 = cross(c0,c1);
// division by 0 ???
float det = 1.0/(c0.x*(c2.z*c1.y-c1.z*c2.y) - c0.y*(c2.z*c1.x-c1.z*c2.x) + c0.z*(c2.y*c1.x-c1.y*c2.x));
float3 i0,i1,i2;
//i0 =
i0.x = (c2.z*c1.y-c1.z*c2.y)*det;
i0.y = -(c2.z*c1.x-c1.z*c2.x)*det;
i0.z = (c2.y*c1.x-c1.y*c2.x)*det;
//i1 =
i1.x = -(c2.z*c0.y-c0.z*c2.y)*det;
i1.y = (c2.z*c0.x-c0.z*c2.x)*det;
i1.z = -(c2.y*c0.x-c0.y*c2.x)*det;
//i2 =
i2.x = (c1.z*c0.y-c0.z*c1.y)*det;
i2.y = -(c1.z*c0.x-c0.z*c1.x)*det;
i2.z = (c1.y*c0.x-c0.y*c1.x)*det;
// Final values
o0.x = i2.x;
o0.y = i2.y;
o0.z = i2.z;
o0.w = -dot(-i2,v2);
o1.x = i0.x;
o1.y = i0.y;
o1.z = i0.z;
o1.w = dot(-i0,v2);
o2.x = i1.x;
o2.y = i1.y;
o2.z = i1.z;
o2.w = dot(-i1,v2);
if (o0.x == 0.0f)
o0.x = 0.0f;
}
//------------------------------------------------------------------------
// Creates a node in the compact layout
__device__ int createLeaf(int tid, int outOfs, float* outTriMem, int* outIdxMem, int start, int end, float* inTriMem, int* inIdxMem)
{
// Compute output data pointers
int numTris = end-start;
float4 triData;
int idxData;
int* inIdx = inIdxMem + start; // Memory for the first triangle index
float4* outTri = ((float4*)outTriMem) + outOfs; // Memory for the first triangle data
int* outIdx = outIdxMem + outOfs; // Memory for the first triangle index
// Write out all triangles and the triangle sentinel per vertex
int numIters = taskWarpSubtasksZero(numTris*3+1); // Number of written out data chunks divided by WARP_SIZE
for(int i = 0; i < numIters; i++)
{
int pos = i*WARP_SIZE + tid;
int tri = pos/3;
int item = pos % 3;
if(tri < numTris) // Regular triangle
{
idxData = inIdx[tri];
/*float4* inTri = ((float4*)inTriMem) + idxData*3; // Memory for the first triangle data
//triData.x = inTri[item].x;
//triData.y = inTri[item].y;
//triData.z = inTri[item].z;
//triData.w = inTri[item].w;
triData = inTri[item];*/
triData = tex1Dfetch(t_trisA, idxData*3 + item);
}
else // Sentinel
{
idxData = 0;
triData = make_float4(__int_as_float(0x80000000));
}
// Write out the data
if(tri < numTris || (tri == numTris && item == 0))
{
outTri[pos] = triData;
outIdx[pos] = idxData;
}
}
return ~outOfs;
}
//------------------------------------------------------------------------
// Creates a leaf in the compact layout, with Woop triangles
__device__ int createLeafWoop(int tid, int outOfs, float4* outTriMem, int* outIdxMem, int start, int end, float4* inTriMem, int* inIdxMem)
{
// Compute output data pointers
int numTris = end-start;
int idxData;
int* inIdx = inIdxMem + start; // Memory for the first triangle index
float4* outTri = outTriMem + outOfs; // Memory for the first triangle data
int* outIdx = outIdxMem + outOfs; // Memory for the first triangle index
// Write out all triangles and the triangle sentinel per vertex
int numIters = taskWarpSubtasksZero(numTris); // Number of written out data chunks divided by WARP_SIZE
for(int i = 0; i < numIters; i++)
{
int tri = i*WARP_SIZE + tid;
int pos = tri*3;
if(tri < numTris) // Regular triangle
{
idxData = inIdx[tri];
float3 v0, v1, v2;
float4 o0, o1, o2;
taskFetchTri((CUdeviceptr)inTriMem, idxData*3, v0, v1, v2);
calcWoop(v0, v1, v2, o0, o1, o2);
outTri[pos+0] = o0;
outTri[pos+1] = o1;
outTri[pos+2] = o2;
outIdx[pos] = idxData;
}
}
if(tid == 0)
{
outTri[numTris*3].x = __int_as_float(0x80000000);
outIdx[numTris*3] = 0;
}
return ~outOfs;
}
//------------------------------------------------------------------------
// Creates a leaf in the compact layout, with references to triangles
__device__ int createLeafReference(int tid, int outOfs, int* outIdxMem, int start, int end, int* inIdxMem)
{
// Compute output data pointers
int numTris = end-start;
int idxData;
int* inIdx = inIdxMem + start; // Memory for the first triangle index
int* outIdx = outIdxMem + outOfs; // Memory for the first triangle index
// Write out all triangles and the triangle sentinel per vertex
int numIters = taskWarpSubtasksZero(numTris); // Number of written out data chunks divided by WARP_SIZE
for(int i = 0; i < numIters; i++)
{
int tri = i*WARP_SIZE + tid;
if(tri < numTris) // Regular triangle
{
idxData = inIdx[tri];
outIdx[tri] = idxData;
}
}
if(tid == 0)
{
outIdx[numTris] = 0x80000000;
}
return ~outOfs;
}
//------------------------------------------------------------------------
// Creates a leaf for a Kdtree, with Woop triangles
__device__ int createKdtreeLeafWoop(int tid, int outOfs, float4* outTriMem, int* outIdxMem, int start, int end, float4* inTriMem, int* inIdxMem)
{
// Compute output data pointers
int numTris = end-start;
int idxData;
int* inIdx = inIdxMem + start; // Memory for the first triangle index
float4* outTri = outTriMem + outOfs; // Memory for the first triangle data
int* outIdx = outIdxMem + outOfs; // Memory for the first triangle index
// Write out all triangles and the triangle sentinel per vertex
int numIters = taskWarpSubtasksZero(numTris); // Number of written out data chunks divided by WARP_SIZE
for(int i = 0; i < numIters; i++)
{
int tri = i*WARP_SIZE + tid;
int pos = tri*3;
if(tri < numTris) // Regular triangle
{
idxData = inIdx[tri];
float3 v0, v1, v2;
float4 o0, o1, o2;
taskFetchTri((CUdeviceptr)inTriMem, idxData*3, v0, v1, v2);
calcWoop(v0, v1, v2, o0, o1, o2);
outTri[pos+0] = o0;
outTri[pos+1] = o1;
outTri[pos+2] = o2;
outIdx[pos] = idxData;
}
}
return numTris | KDTREE_LEAF;
}
//------------------------------------------------------------------------
// Creates a leaf for a Kdtree, with Woop triangles
__device__ int createKdtreeInterleavedLeafWoop(int tid, int outOfs, char* outTriMem, int start, int end, float4* inTriMem, int* inIdxMem)
{
// Compute output data pointers
int numTris = end-start;
int idxData;
int* inIdx = inIdxMem + start; // Memory for the first triangle index
float4* outTri = (float4*)(outTriMem + outOfs); // Memory for the first triangle data
int* outIdx = (int*)(outTriMem + outOfs + numTris*3*sizeof(float4)); // Memory for the first triangle index
// Write out all triangles and the triangle sentinel per vertex
int numIters = taskWarpSubtasksZero(numTris); // Number of written out data chunks divided by WARP_SIZE
for(int i = 0; i < numIters; i++)
{
int tri = i*WARP_SIZE + tid;
int pos = tri*3;
if(tri < numTris) // Regular triangle
{
idxData = inIdx[tri];
float3 v0, v1, v2;
float4 o0, o1, o2;
taskFetchTri((CUdeviceptr)inTriMem, idxData*3, v0, v1, v2);
calcWoop(v0, v1, v2, o0, o1, o2);
outTri[pos+0] = o0;
outTri[pos+1] = o1;
outTri[pos+2] = o2;
outIdx[tri] = idxData;
}
}
return numTris | KDTREE_LEAF;
}
//------------------------------------------------------------------------
// Kernel converting regular triangles to Woop triangles
extern "C" __global__ void createWoop(CUdeviceptr tri, CUdeviceptr woop, int numTris)
{
// Compute output data pointers
int idx = blockDim.x * blockIdx.x + threadIdx.x; // 1D index
if(idx < numTris)
{
float3 v0, v1, v2;
float4 o0, o1, o2;
taskFetchTri(tri, idx*3, v0, v1, v2);
calcWoop(v0, v1, v2, o0, o1, o2);
float4* woopData = (float4*)woop;
woopData[idx*3+0] = o0;
woopData[idx*3+1] = o1;
woopData[idx*3+2] = o2;
}
}
//------------------------------------------------------------------------
// Returns true if the node is a leaf
__device__ bool isKdLeaf(int flag)
{
#if defined(COMPACT_LAYOUT) && defined(WOOP_TRIANGLES)
return flag < 0;
#else
return flag & KDTREE_LEAF;
#endif
}
//------------------------------------------------------------------------ |
a49ee02685bf236d4319046d6274275727b4f602.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(const float* A, const float* B, float* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1=0;
float Value2=0;
float Value3=0;
float Value=0;
float I1=A[i];
float I2=B[i];
#pragma unroll 100
// Excessive Addition access
for(float k=0; k<(float)iterations;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main(int argc, char** argv)
{
int iterations;
if(argc!=2) {
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks with iterations %d\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| a49ee02685bf236d4319046d6274275727b4f602.cu | #include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(const float* A, const float* B, float* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1=0;
float Value2=0;
float Value3=0;
float Value=0;
float I1=A[i];
float I2=B[i];
#pragma unroll 100
// Excessive Addition access
for(float k=0; k<(float)iterations;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main(int argc, char** argv)
{
int iterations;
if(argc!=2) {
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks with iterations %d\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
61ee853a710bbcacc9886992d0c65ab065fae6fd.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <hip/hip_runtime.h>
#include "mesh.h"
namespace acr
{
Mesh::Mesh(const aiMesh *aiMesh)
: materialIndex(aiMesh->mMaterialIndex)
{
thrust::host_vector<Vertex> vs(aiMesh->mNumVertices);
//To get centroid
math::vec3 sumVertices(0, 0, 0);
math::vec3 minBound(FLT_MAX, FLT_MAX, FLT_MAX);
math::vec3 maxBound(-FLT_MAX, -FLT_MAX, -FLT_MAX);
for (uint32_t i = 0; i < aiMesh->mNumVertices; i++)
{
for (uint32_t j = 0; j < 3; j++)
{
vs[i].position[j] = aiMesh->mVertices[i][j];
vs[i].normal[j] = aiMesh->mNormals[i][j];
vs[i].color[j] = aiMesh->mColors[0] ? aiMesh->mColors[0][i][j] : 1.0f;
}
sumVertices += vs[i].position;
minBound = math::min(minBound, vs[i].position);
maxBound = math::max(maxBound, vs[i].position);
}
boundingBox.min = minBound;
boundingBox.max = maxBound;
//Average to get centroid
centroid = sumVertices / (float)aiMesh->mNumVertices;
vertices = vector<Vertex>(vs);
thrust::host_vector<Face> f(aiMesh->mNumFaces);
for (uint32_t i = 0; i < aiMesh->mNumFaces; i++)
{
for (uint32_t j = 0; j < 3; j++)
{
f[i].indices[j] = aiMesh->mFaces[i].mIndices[j];
}
}
faces = BIH<Face>(f, boundingBox, &vs[0]);
}
Mesh::~Mesh() {}
bool Mesh::intersect(const Ray &r, HitInfo &info)
{
Path p;
if (boundingBox.intersect(r, info) && faces.intersect(r, info, &vertices[0], p))
{
info.materialIndex = materialIndex;
return true;
}
return false;
}
} // namespace acr
| 61ee853a710bbcacc9886992d0c65ab065fae6fd.cu | #include <cstdlib>
#include <cuda.h>
#include "mesh.h"
namespace acr
{
Mesh::Mesh(const aiMesh *aiMesh)
: materialIndex(aiMesh->mMaterialIndex)
{
thrust::host_vector<Vertex> vs(aiMesh->mNumVertices);
//To get centroid
math::vec3 sumVertices(0, 0, 0);
math::vec3 minBound(FLT_MAX, FLT_MAX, FLT_MAX);
math::vec3 maxBound(-FLT_MAX, -FLT_MAX, -FLT_MAX);
for (uint32_t i = 0; i < aiMesh->mNumVertices; i++)
{
for (uint32_t j = 0; j < 3; j++)
{
vs[i].position[j] = aiMesh->mVertices[i][j];
vs[i].normal[j] = aiMesh->mNormals[i][j];
vs[i].color[j] = aiMesh->mColors[0] ? aiMesh->mColors[0][i][j] : 1.0f;
}
sumVertices += vs[i].position;
minBound = math::min(minBound, vs[i].position);
maxBound = math::max(maxBound, vs[i].position);
}
boundingBox.min = minBound;
boundingBox.max = maxBound;
//Average to get centroid
centroid = sumVertices / (float)aiMesh->mNumVertices;
vertices = vector<Vertex>(vs);
thrust::host_vector<Face> f(aiMesh->mNumFaces);
for (uint32_t i = 0; i < aiMesh->mNumFaces; i++)
{
for (uint32_t j = 0; j < 3; j++)
{
f[i].indices[j] = aiMesh->mFaces[i].mIndices[j];
}
}
faces = BIH<Face>(f, boundingBox, &vs[0]);
}
Mesh::~Mesh() {}
bool Mesh::intersect(const Ray &r, HitInfo &info)
{
Path p;
if (boundingBox.intersect(r, info) && faces.intersect(r, info, &vertices[0], p))
{
info.materialIndex = materialIndex;
return true;
}
return false;
}
} // namespace acr
|
8e16f9e622e31ff8becae24c0bcf250b0d212c91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* All threads increments a counter in global memory
* by one. The difference is that one uses CUDA's atomic function
* to perform an increment.
* What to observe/ponder:
* - What are the values that are printed out?
* - Are they consistent across runs?
*/
#include <stdio.h>
__device__ __managed__ int counter;
void check_cuda_errors()
{
hipError_t rc;
rc = hipGetLastError();
if (rc != hipSuccess)
{
printf("Last CUDA error %s\n", hipGetErrorString(rc));
}
}
__global__ void non_atomic()
{
counter++;
}
__global__ void atomic()
{
atomicAdd(&counter, 1);
}
int main(int argc, char **argv)
{
// Set up counter
counter = 0;
dim3 gridDim(128, 128);
dim3 blockDim(32, 32);
hipLaunchKernelGGL(( non_atomic), dim3(gridDim), dim3(blockDim), 0, 0, );
hipDeviceSynchronize();
check_cuda_errors();
printf("Result from non-atomic increment by 16777216 threads: %d\n", counter);
// Reset counter
counter = 0;
hipLaunchKernelGGL(( atomic), dim3(gridDim), dim3(blockDim), 0, 0, );
hipDeviceSynchronize();
check_cuda_errors();
printf("Result from atomic increment by 16777216 threads: %d\n", counter);
return 0;
} | 8e16f9e622e31ff8becae24c0bcf250b0d212c91.cu | /**
* All threads increments a counter in global memory
* by one. The difference is that one uses CUDA's atomic function
* to perform an increment.
* What to observe/ponder:
* - What are the values that are printed out?
* - Are they consistent across runs?
*/
#include <stdio.h>
__device__ __managed__ int counter;
void check_cuda_errors()
{
cudaError_t rc;
rc = cudaGetLastError();
if (rc != cudaSuccess)
{
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
}
}
__global__ void non_atomic()
{
counter++;
}
__global__ void atomic()
{
atomicAdd(&counter, 1);
}
int main(int argc, char **argv)
{
// Set up counter
counter = 0;
dim3 gridDim(128, 128);
dim3 blockDim(32, 32);
non_atomic<<<gridDim, blockDim>>>();
cudaDeviceSynchronize();
check_cuda_errors();
printf("Result from non-atomic increment by 16777216 threads: %d\n", counter);
// Reset counter
counter = 0;
atomic<<<gridDim, blockDim>>>();
cudaDeviceSynchronize();
check_cuda_errors();
printf("Result from atomic increment by 16777216 threads: %d\n", counter);
return 0;
} |
1ad3e4f84d6df8ed7fa5f1f720b7a5f71d240e6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* saxpy.cu
*
* Part of the microdemo to illustrate how to initialize the driver API.
* Compile this into ptx with:
*
* Build with: nvcc --ptx saxpy.cu
*
* The resulting .ptx file is needed by the sample saxpyDrv.cpp.
*
* Copyright (c) 2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
// saxpy global function adds in[i]*alpha to each element out[i]
extern "C" __global__ void
saxpy( float *out, const float *in, size_t N, float alpha )
{
for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x;
i < N;
i += blockDim.x*gridDim.x ) {
out[i] += in[i]*alpha;
}
}
| 1ad3e4f84d6df8ed7fa5f1f720b7a5f71d240e6f.cu | /*
*
* saxpy.cu
*
* Part of the microdemo to illustrate how to initialize the driver API.
* Compile this into ptx with:
*
* Build with: nvcc --ptx saxpy.cu
*
* The resulting .ptx file is needed by the sample saxpyDrv.cpp.
*
* Copyright (c) 2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
// saxpy global function adds in[i]*alpha to each element out[i]
extern "C" __global__ void
saxpy( float *out, const float *in, size_t N, float alpha )
{
for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x;
i < N;
i += blockDim.x*gridDim.x ) {
out[i] += in[i]*alpha;
}
}
|
66f132cb3088a2a672363231389cf81387910bac.hip | // !!! This is a file automatically generated by hipify!!!
//#include "../../include/layers/inner_product_layer.h"
//
//#include "../../include/util/math_function_ptr.h"
//
//namespace BigBang {
//
//template<typename dtype>
//void InnerProductLayer<dtype>::Forward_GPU(const Tensor<dtype>* bottom, Tensor<dtype>* top) {
// bigbang_gpu_gemm<dtype>(false, false, bottom_row_, weights_column_, bottom_column_, 1.,
// bottom->gpu_data(), weights_->gpu_data(), 0., top->mutable_gpu_data());
//}
//
//template<typename dtype>
//void InnerProductLayer<dtype>::Backward_GPU(const Tensor<dtype>* top, Tensor<dtype>* bottom) {
// const dtype* bottom_data = bottom->gpu_data();
// const dtype* top_diff_data = top->gpu_diff_data();
// //get the delta
// bigbang_gpu_gemm<dtype>(false, true, top_row_, weights_column, top_column_, 1., top_diff_data,
// weights_->gpu_data(), 0, bottom->mutable_gpu_diff_data());
// UpdateParams_GPU(bottom_data, top_diff_data);
//}
//
//template<typename dtype>
//void InnerProductLayer<dtype>::UpdateParams_GPU(const dtype* bottom_data, const dtype* delta) {
// //update the biases
// if (use_biases_) {
// dtype* biases_mutable_diff_data = biases_->mutable_gpu_data();
// bigbang_gpu_column_sum_plus(delta, bottom_row_, biases_row_, biases_mutable_diff_data);
// bigbang_gpu_minus(biases_->gpu_data(), biases_mutable_diff_data, biases_row_, alpha_ / bottom_row_,
// biases_->mutable_gpu_data());
// }
//
// //update the weights
// dtype* weights_diff_data = weights_->mutable_gpu_diff_data();
// hipMemset(weights_diff_data, 0, sizeof(dtype)*weights_row_*weights_column_);
// /*bigbang_cpu_gemm(bottom_data, bottom_row_, bottom_column_, true, delta, top_row_, top_column_,
// false, alpha_ / bottom_row_, (dtype*)nullptr, 0, 0, false, weights_diff_data);*/
// bigbang_gpu_gemm<dtype>(true, false, bottom_row_, top_column_, bottom_column_, alpha_ / bottom_row_,
// bottom_data, delta, 0, weights_diff_data);
// bigbang_gpu_minus(weights_->gpu_data(), weights_diff_data, weights_row_*weights_column_,
// static_cast<dtype>(1.0), weights_->mutable_gpu_data());
//}
//
//
//
//} | 66f132cb3088a2a672363231389cf81387910bac.cu | //#include "../../include/layers/inner_product_layer.h"
//
//#include "../../include/util/math_function_ptr.h"
//
//namespace BigBang {
//
//template<typename dtype>
//void InnerProductLayer<dtype>::Forward_GPU(const Tensor<dtype>* bottom, Tensor<dtype>* top) {
// bigbang_gpu_gemm<dtype>(false, false, bottom_row_, weights_column_, bottom_column_, 1.,
// bottom->gpu_data(), weights_->gpu_data(), 0., top->mutable_gpu_data());
//}
//
//template<typename dtype>
//void InnerProductLayer<dtype>::Backward_GPU(const Tensor<dtype>* top, Tensor<dtype>* bottom) {
// const dtype* bottom_data = bottom->gpu_data();
// const dtype* top_diff_data = top->gpu_diff_data();
// //get the delta
// bigbang_gpu_gemm<dtype>(false, true, top_row_, weights_column, top_column_, 1., top_diff_data,
// weights_->gpu_data(), 0, bottom->mutable_gpu_diff_data());
// UpdateParams_GPU(bottom_data, top_diff_data);
//}
//
//template<typename dtype>
//void InnerProductLayer<dtype>::UpdateParams_GPU(const dtype* bottom_data, const dtype* delta) {
// //update the biases
// if (use_biases_) {
// dtype* biases_mutable_diff_data = biases_->mutable_gpu_data();
// bigbang_gpu_column_sum_plus(delta, bottom_row_, biases_row_, biases_mutable_diff_data);
// bigbang_gpu_minus(biases_->gpu_data(), biases_mutable_diff_data, biases_row_, alpha_ / bottom_row_,
// biases_->mutable_gpu_data());
// }
//
// //update the weights
// dtype* weights_diff_data = weights_->mutable_gpu_diff_data();
// cudaMemset(weights_diff_data, 0, sizeof(dtype)*weights_row_*weights_column_);
// /*bigbang_cpu_gemm(bottom_data, bottom_row_, bottom_column_, true, delta, top_row_, top_column_,
// false, alpha_ / bottom_row_, (dtype*)nullptr, 0, 0, false, weights_diff_data);*/
// bigbang_gpu_gemm<dtype>(true, false, bottom_row_, top_column_, bottom_column_, alpha_ / bottom_row_,
// bottom_data, delta, 0, weights_diff_data);
// bigbang_gpu_minus(weights_->gpu_data(), weights_diff_data, weights_row_*weights_column_,
// static_cast<dtype>(1.0), weights_->mutable_gpu_data());
//}
//
//
//
//} |
73653a4879056ee18ae6497e810149c4f908f5f3.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <numeric>
//#define ITER_WORKLOAD
#include "../../comm/coo.cuh"
#include "../../comm/csr.cuh"
#include "../../util/time.cuh"
#include "baseline.cuh"
#include "gc.cuh"
#include "validation.cuh"
#define FETCH_SIZE (FETCHSIZE)
using namespace std;
int main(int argc, char *argv[])
{
char *input_file = NULL;
bool start_from_0 = false;
bool write_profile = false;
uint32_t min_iter = 2500;
int source = 0;
int option = 3;
int num_queue=1;
int device = 0;
int rounds = 10;
bool verbose = 0;
int run_bsp_async = 0; // 0 run both, 1 run bsp only, 2 run async only
bool permute = false;
bool ifmesh = false;
if(argc == 1)
{
cout<< "./test -f <file> -s <file vertex ID start from 0?=false> -w <write profile?=false> -i <min iteration for queue=2500> -o <choose queue run launch1 or launch4=launch1> -r <source node to start=0> -q <number of queues used=4> -d <device id=0>\n";
exit(0);
}
if(argc > 1)
for(int i=1; i<argc; i++) {
if(string(argv[i]) == "-f")
input_file = argv[i+1];
else if(string(argv[i]) == "-s")
start_from_0 = stoi(argv[i+1]);
else if(string(argv[i]) == "-w")
write_profile = stoi(argv[i+1]);
else if(string(argv[i]) == "-i")
min_iter = stoi(argv[i+1]);
else if(string(argv[i]) == "-o")
option = stoi(argv[i+1]);
else if(string(argv[i]) == "-r")
source = stoi(argv[i+1]);
else if(string(argv[i]) == "-q")
num_queue= stoi(argv[i+1]);
else if(string(argv[i]) == "-d")
device= stoi(argv[i+1]);
else if(string(argv[i]) == "-rounds")
rounds = stoi(argv[i+1]);
else if(string(argv[i]) == "-v")
verbose = stoi(argv[i+1]);
else if(string(argv[i]) == "-run_bsp_async")
run_bsp_async = stoi(argv[i+1]);
else if(string(argv[i]) == "-permute")
permute = stoi(argv[i+1]);
else if(string(argv[i]) == "-mesh")
ifmesh = stoi(argv[i+1]);
}
if(input_file == NULL)
{
cout << "input file is needed\n";
cout<< "./test -f <file> -s <file vertex ID start from 0?=false> -w <write profile?=false> -i <min iteration for queue=2500> -o <choose queue run launch1 or launch4=launch1> -r <source node to start=0> -q <number of queues used=4> -d <device id=0>\n";
exit(0);
}
std::cout << "set on device "<< device << std::endl;
CUDA_CHECK(hipSetDevice(device));
int numBlock = 56*5;
int numThread = 256;
//if(option == 1)
//hipOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchThreadPerItem_minIter<int, uint32_t, BFSThread<int,int>, BFS<int, int >> );
if(option == 2)
hipOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchWarpPer32Items_minIter<int, uint32_t, GCWarp_op<int,int>, GC_Async<int, int>>, 0, 1000);
else if(option == 3) {
if(ifmesh)
hipOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchCTA_minIter<int, uint32_t, FETCH_SIZE, GCCTA_mesh<FETCH_SIZE, int,int>, GC_Async<int, int>>, 0, 1000);
else
hipOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchCTA_minIter<int, uint32_t, FETCH_SIZE, GCCTA<FETCH_SIZE,int,int>, GC_Async<int, int>>, 0, 1000);
//hipOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchCTA_minIter<int, uint32_t, FETCH_SIZE, GCCTA_simple2<FETCH_SIZE, int,int>, GC_Async<int, int>>, 0, 600);
//numThread=512; numBlock=160;
}
else if(option == 4)
hipOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchCTA_minIter<int, uint32_t, FETCH_SIZE, GCCTA_simple<FETCH_SIZE, 1, int,int>, GC_Async<int, int>>, 0, 100);
std::cout << "num of block: " << numBlock << " num of threads per block: "<< numThread << std::endl;
std::cout << "file: "<< input_file << " start from 0: " << start_from_0 << " write profile file: "<< write_profile << " " << numBlock << "x"<< numThread << " min iter "<< min_iter<< std::endl;
//" option "<< option<< " FETCH SIZE " << FETCH_SIZE << " BLOCK SIZE " << BLOCK_SIZE << std::endl;
std::cout << "permute " << permute << std::endl;
std::string str_file(input_file);
Csr<int, int> csr;
if(str_file.substr(str_file.length()-4) == ".mtx")
{
std::cout << "generate csr file first\n";
exit(0);
}
else if(str_file.substr(str_file.length()-4) == ".csr")
{
csr.ReadFromBinary(input_file);
}
csr.PrintCsr();
GpuTimer timer;
std::vector<float> times;
std::vector<uint64_t> workloads;
if(run_bsp_async == 0 || run_bsp_async == 1) {
GC_BSP<int, int> gc_bsp(csr);
CUDA_CHECK(hipDeviceSynchronize());
hipStream_t stream;
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
//warm up
//gc_bsp.GCInit(permute, true);
//gc_bsp.GCStart_op(stream);
gc_bsp.GCStart_warp_thread_cta();
for(int iteration=0; iteration < rounds; iteration++) {
gc_bsp.reset();
//gc_bsp.GCInit(permute, false);
timer.Start();
//uint32_t workload = gc_bsp.GCStart_op(stream);
uint32_t workload = gc_bsp.GCStart_warp_thread_cta();
timer.Stop();
float elapsed = timer.ElapsedMillis();
std::cout << "Time: " << elapsed << std::endl;
times.push_back(elapsed);
workloads.push_back(workload);
}
if(times.size() > 0) {
std::cout << "Ave. Time: "<< std::accumulate(times.begin(), times.end(), float(0.0))/times.size() << std::endl;
std::cout << "Ave. Workload(vertices): "<< std::accumulate(workloads.begin(), workloads.end(), (uint64_t)0)/workloads.size() << std::endl;
}
GCValid<int, int>(gc_bsp);
std::cout << "-----------------------------------------\n\n" << std::endl;
gc_bsp.release();
}
if(run_bsp_async == 0 || run_bsp_async == 2) {
GC_Async<int, int> gc_async(csr, min_iter, num_queue);
times.clear();
workloads.clear();
if(option == 2) {
// warm up
gc_async.GCInit(numBlock, numThread, permute, true);
gc_async.worklists.print();
gc_async.GCStart(numBlock, numThread);
for(int iteration=0; iteration < rounds; iteration++) {
gc_async.reset();
gc_async.GCInit(numBlock, numThread, permute, false);
//gc_async.worklists.print();
timer.Start();
gc_async.GCStart(numBlock, numThread);
timer.Stop();
gc_async.worklists.print();
float elapsed = timer.ElapsedMillis();
std::cout << "Time: " << elapsed << std::endl;
times.push_back(elapsed);
uint32_t workload = gc_async.getWorkload();
workloads.push_back(workload);
}
//gc_async.outputVistTimes("chesapeak_warp_freq.txt");
}
else if(option == 3) {
//gc_async.outputNeighborLen("road_ca_neighborlen.txt");
// warm up
gc_async.GCInit_CTA(numBlock, numThread, permute, true);
gc_async.worklists.print();
gc_async.GCStart_CTA<FETCH_SIZE>(numBlock, numThread, ifmesh);
for(int iteration=0; iteration < rounds; iteration++) {
gc_async.reset();
gc_async.GCInit_CTA(numBlock, numThread, permute, false);
timer.Start();
gc_async.GCStart_CTA<FETCH_SIZE>(numBlock, numThread, ifmesh);
timer.Stop();
gc_async.worklists.print();
float elapsed = timer.ElapsedMillis();
std::cout << "Time: " << elapsed << std::endl;
times.push_back(elapsed);
uint32_t workload = gc_async.getWorkload();
workloads.push_back(workload);
}
//gc_async.outputVistTimes("road_ca_simple_freq.txt");
}
else if(option == 4) {
gc_async.GCInit_CTA(numBlock, numThread, permute, true);
gc_async.worklists.print();
gc_async.GCStart_CTA_simple<FETCH_SIZE>(numBlock, numThread);
for(int iteration=0; iteration < rounds; iteration++) {
gc_async.reset();
gc_async.GCInit_CTA(numBlock, numThread, permute, false);
timer.Start();
gc_async.GCStart_CTA_simple<FETCH_SIZE>(numBlock, numThread);
timer.Stop();
gc_async.worklists.print();
float elapsed = timer.ElapsedMillis();
std::cout << "Time: " << elapsed << std::endl;
times.push_back(elapsed);
uint32_t workload = gc_async.getWorkload();
workloads.push_back(workload);
}
}
if(times.size() > 0) {
std::cout << "Ave. Time: "<< std::accumulate(times.begin(), times.end(), float(0.0))/times.size() << std::endl;
std::cout << "Ave. Workload(vertices): "<< std::accumulate(workloads.begin(), workloads.end(), (uint64_t)0)/workloads.size() << std::endl;
}
GCValid<int, int>(gc_async);
gc_async.release();
}
if(run_bsp_async == 0 || run_bsp_async == 3) {
GC_Async<int, int> gc_async(csr, min_iter, num_queue);
times.clear();
workloads.clear();
//warm up
gc_async.GCInit_discrete(permute, true);
gc_async.worklists.print();
gc_async.GCStart_discrete<1, 256>();
for(int iteration=0; iteration < rounds; iteration++) {
gc_async.reset();
gc_async.GCInit_discrete(permute, false);
//gc_async.worklists.print();
timer.Start();
gc_async.GCStart_discrete<1, 256>();
timer.Stop();
gc_async.worklists.print();
float elapsed = timer.ElapsedMillis();
std::cout << "Time: " << elapsed << std::endl;
times.push_back(elapsed);
uint32_t workload = gc_async.getWorkload();
workloads.push_back(workload);
}
if(times.size() > 0) {
std::cout << "Ave. Time: "<< std::accumulate(times.begin(), times.end(), float(0.0))/times.size() << std::endl;
std::cout << "Ave. Workload(vertices): "<< std::accumulate(workloads.begin(), workloads.end(), (uint64_t)0)/workloads.size() << std::endl;
}
GCValid<int, int>(gc_async);
gc_async.release();
}
csr.release();
return 0;
}
| 73653a4879056ee18ae6497e810149c4f908f5f3.cu | #include <iostream>
#include <string>
#include <numeric>
//#define ITER_WORKLOAD
#include "../../comm/coo.cuh"
#include "../../comm/csr.cuh"
#include "../../util/time.cuh"
#include "baseline.cuh"
#include "gc.cuh"
#include "validation.cuh"
#define FETCH_SIZE (FETCHSIZE)
using namespace std;
int main(int argc, char *argv[])
{
char *input_file = NULL;
bool start_from_0 = false;
bool write_profile = false;
uint32_t min_iter = 2500;
int source = 0;
int option = 3;
int num_queue=1;
int device = 0;
int rounds = 10;
bool verbose = 0;
int run_bsp_async = 0; // 0 run both, 1 run bsp only, 2 run async only
bool permute = false;
bool ifmesh = false;
if(argc == 1)
{
cout<< "./test -f <file> -s <file vertex ID start from 0?=false> -w <write profile?=false> -i <min iteration for queue=2500> -o <choose queue run launch1 or launch4=launch1> -r <source node to start=0> -q <number of queues used=4> -d <device id=0>\n";
exit(0);
}
if(argc > 1)
for(int i=1; i<argc; i++) {
if(string(argv[i]) == "-f")
input_file = argv[i+1];
else if(string(argv[i]) == "-s")
start_from_0 = stoi(argv[i+1]);
else if(string(argv[i]) == "-w")
write_profile = stoi(argv[i+1]);
else if(string(argv[i]) == "-i")
min_iter = stoi(argv[i+1]);
else if(string(argv[i]) == "-o")
option = stoi(argv[i+1]);
else if(string(argv[i]) == "-r")
source = stoi(argv[i+1]);
else if(string(argv[i]) == "-q")
num_queue= stoi(argv[i+1]);
else if(string(argv[i]) == "-d")
device= stoi(argv[i+1]);
else if(string(argv[i]) == "-rounds")
rounds = stoi(argv[i+1]);
else if(string(argv[i]) == "-v")
verbose = stoi(argv[i+1]);
else if(string(argv[i]) == "-run_bsp_async")
run_bsp_async = stoi(argv[i+1]);
else if(string(argv[i]) == "-permute")
permute = stoi(argv[i+1]);
else if(string(argv[i]) == "-mesh")
ifmesh = stoi(argv[i+1]);
}
if(input_file == NULL)
{
cout << "input file is needed\n";
cout<< "./test -f <file> -s <file vertex ID start from 0?=false> -w <write profile?=false> -i <min iteration for queue=2500> -o <choose queue run launch1 or launch4=launch1> -r <source node to start=0> -q <number of queues used=4> -d <device id=0>\n";
exit(0);
}
std::cout << "set on device "<< device << std::endl;
CUDA_CHECK(cudaSetDevice(device));
int numBlock = 56*5;
int numThread = 256;
//if(option == 1)
//cudaOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchThreadPerItem_minIter<int, uint32_t, BFSThread<int,int>, BFS<int, int >> );
if(option == 2)
cudaOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchWarpPer32Items_minIter<int, uint32_t, GCWarp_op<int,int>, GC_Async<int, int>>, 0, 1000);
else if(option == 3) {
if(ifmesh)
cudaOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchCTA_minIter<int, uint32_t, FETCH_SIZE, GCCTA_mesh<FETCH_SIZE, int,int>, GC_Async<int, int>>, 0, 1000);
else
cudaOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchCTA_minIter<int, uint32_t, FETCH_SIZE, GCCTA<FETCH_SIZE,int,int>, GC_Async<int, int>>, 0, 1000);
//cudaOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchCTA_minIter<int, uint32_t, FETCH_SIZE, GCCTA_simple2<FETCH_SIZE, int,int>, GC_Async<int, int>>, 0, 600);
//numThread=512; numBlock=160;
}
else if(option == 4)
cudaOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)MaxCountQueue::_launchCTA_minIter<int, uint32_t, FETCH_SIZE, GCCTA_simple<FETCH_SIZE, 1, int,int>, GC_Async<int, int>>, 0, 100);
std::cout << "num of block: " << numBlock << " num of threads per block: "<< numThread << std::endl;
std::cout << "file: "<< input_file << " start from 0: " << start_from_0 << " write profile file: "<< write_profile << " " << numBlock << "x"<< numThread << " min iter "<< min_iter<< std::endl;
//" option "<< option<< " FETCH SIZE " << FETCH_SIZE << " BLOCK SIZE " << BLOCK_SIZE << std::endl;
std::cout << "permute " << permute << std::endl;
std::string str_file(input_file);
Csr<int, int> csr;
if(str_file.substr(str_file.length()-4) == ".mtx")
{
std::cout << "generate csr file first\n";
exit(0);
}
else if(str_file.substr(str_file.length()-4) == ".csr")
{
csr.ReadFromBinary(input_file);
}
csr.PrintCsr();
GpuTimer timer;
std::vector<float> times;
std::vector<uint64_t> workloads;
if(run_bsp_async == 0 || run_bsp_async == 1) {
GC_BSP<int, int> gc_bsp(csr);
CUDA_CHECK(cudaDeviceSynchronize());
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
//warm up
//gc_bsp.GCInit(permute, true);
//gc_bsp.GCStart_op(stream);
gc_bsp.GCStart_warp_thread_cta();
for(int iteration=0; iteration < rounds; iteration++) {
gc_bsp.reset();
//gc_bsp.GCInit(permute, false);
timer.Start();
//uint32_t workload = gc_bsp.GCStart_op(stream);
uint32_t workload = gc_bsp.GCStart_warp_thread_cta();
timer.Stop();
float elapsed = timer.ElapsedMillis();
std::cout << "Time: " << elapsed << std::endl;
times.push_back(elapsed);
workloads.push_back(workload);
}
if(times.size() > 0) {
std::cout << "Ave. Time: "<< std::accumulate(times.begin(), times.end(), float(0.0))/times.size() << std::endl;
std::cout << "Ave. Workload(vertices): "<< std::accumulate(workloads.begin(), workloads.end(), (uint64_t)0)/workloads.size() << std::endl;
}
GCValid<int, int>(gc_bsp);
std::cout << "-----------------------------------------\n\n" << std::endl;
gc_bsp.release();
}
if(run_bsp_async == 0 || run_bsp_async == 2) {
GC_Async<int, int> gc_async(csr, min_iter, num_queue);
times.clear();
workloads.clear();
if(option == 2) {
// warm up
gc_async.GCInit(numBlock, numThread, permute, true);
gc_async.worklists.print();
gc_async.GCStart(numBlock, numThread);
for(int iteration=0; iteration < rounds; iteration++) {
gc_async.reset();
gc_async.GCInit(numBlock, numThread, permute, false);
//gc_async.worklists.print();
timer.Start();
gc_async.GCStart(numBlock, numThread);
timer.Stop();
gc_async.worklists.print();
float elapsed = timer.ElapsedMillis();
std::cout << "Time: " << elapsed << std::endl;
times.push_back(elapsed);
uint32_t workload = gc_async.getWorkload();
workloads.push_back(workload);
}
//gc_async.outputVistTimes("chesapeak_warp_freq.txt");
}
else if(option == 3) {
//gc_async.outputNeighborLen("road_ca_neighborlen.txt");
// warm up
gc_async.GCInit_CTA(numBlock, numThread, permute, true);
gc_async.worklists.print();
gc_async.GCStart_CTA<FETCH_SIZE>(numBlock, numThread, ifmesh);
for(int iteration=0; iteration < rounds; iteration++) {
gc_async.reset();
gc_async.GCInit_CTA(numBlock, numThread, permute, false);
timer.Start();
gc_async.GCStart_CTA<FETCH_SIZE>(numBlock, numThread, ifmesh);
timer.Stop();
gc_async.worklists.print();
float elapsed = timer.ElapsedMillis();
std::cout << "Time: " << elapsed << std::endl;
times.push_back(elapsed);
uint32_t workload = gc_async.getWorkload();
workloads.push_back(workload);
}
//gc_async.outputVistTimes("road_ca_simple_freq.txt");
}
else if(option == 4) {
gc_async.GCInit_CTA(numBlock, numThread, permute, true);
gc_async.worklists.print();
gc_async.GCStart_CTA_simple<FETCH_SIZE>(numBlock, numThread);
for(int iteration=0; iteration < rounds; iteration++) {
gc_async.reset();
gc_async.GCInit_CTA(numBlock, numThread, permute, false);
timer.Start();
gc_async.GCStart_CTA_simple<FETCH_SIZE>(numBlock, numThread);
timer.Stop();
gc_async.worklists.print();
float elapsed = timer.ElapsedMillis();
std::cout << "Time: " << elapsed << std::endl;
times.push_back(elapsed);
uint32_t workload = gc_async.getWorkload();
workloads.push_back(workload);
}
}
if(times.size() > 0) {
std::cout << "Ave. Time: "<< std::accumulate(times.begin(), times.end(), float(0.0))/times.size() << std::endl;
std::cout << "Ave. Workload(vertices): "<< std::accumulate(workloads.begin(), workloads.end(), (uint64_t)0)/workloads.size() << std::endl;
}
GCValid<int, int>(gc_async);
gc_async.release();
}
if(run_bsp_async == 0 || run_bsp_async == 3) {
GC_Async<int, int> gc_async(csr, min_iter, num_queue);
times.clear();
workloads.clear();
//warm up
gc_async.GCInit_discrete(permute, true);
gc_async.worklists.print();
gc_async.GCStart_discrete<1, 256>();
for(int iteration=0; iteration < rounds; iteration++) {
gc_async.reset();
gc_async.GCInit_discrete(permute, false);
//gc_async.worklists.print();
timer.Start();
gc_async.GCStart_discrete<1, 256>();
timer.Stop();
gc_async.worklists.print();
float elapsed = timer.ElapsedMillis();
std::cout << "Time: " << elapsed << std::endl;
times.push_back(elapsed);
uint32_t workload = gc_async.getWorkload();
workloads.push_back(workload);
}
if(times.size() > 0) {
std::cout << "Ave. Time: "<< std::accumulate(times.begin(), times.end(), float(0.0))/times.size() << std::endl;
std::cout << "Ave. Workload(vertices): "<< std::accumulate(workloads.begin(), workloads.end(), (uint64_t)0)/workloads.size() << std::endl;
}
GCValid<int, int>(gc_async);
gc_async.release();
}
csr.release();
return 0;
}
|
b18a44a281bac8bcbb80dbdea718bbe9d54b9671.hip | // !!! This is a file automatically generated by hipify!!!
// Brute-force Key Search - TEA Encryption with 31-bit Key
// MP4, Spring 2016, GPU Programming @ Auburn University
#include <stdio.h>
#include <stdint.h>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
/* Data to test with (this should be easy to change) */
const uint32_t orig_data[2] = { 0xDEADBEEF, 0x0BADF00D };
const uint32_t encrypted[2] = { 0xFF305F9B, 0xB9BDCECE };
void encrypt(uint32_t *data, const uint32_t *key) {
uint32_t v0=data[0], v1=data[1], sum=0, i; /* set up */
uint32_t delta=0x9e3779b9; /* a key schedule constant */
uint32_t k0=key[0], k1=key[1], k2=key[2], k3=key[3]; /* cache key */
for (i=0; i < 32; i++) { /* basic cycle start */
sum += delta;
v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
} /* end cycle */
data[0]=v0; data[1]=v1;
}
__global__ static void encryptGpu(uint32_t *data, uint32_t *key, uint32_t *key_out) {
/* Try every possible 28-bit integer... */
const uint32_t encrypted[2] = { 0xFF305F9B, 0xB9BDCECE};
uint32_t k = blockDim.x * blockDim.y + threadIdx.x;
if (k <= 0x0FFFFFFF) {
uint32_t v0 = data[0], v1 = data[1], sum = 0, i; /* set up */
uint32_t delta=0x9e3779b9; /* a key schedule constant */
uint32_t k0 = k + key[0] * 2048 * 1024, k1 = k + key[1] * 2048 * 1024, k2 = k + key[2] * 2048 * 1024, k3 = k + key[3] * 2048 * 1024;
for (i=0; i < 32; i++) { /* basic cycle start */
sum += delta;
v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
} /* end cycle */
/* Did we get the correct encrypted values? */
if (v0 == encrypted[0] && v1 == encrypted[1]) {
key_out[0] = k0; key_out[1] = k1; key_out[2] = k2; key_out[3] = k3;
}
}
}
void decrypt(uint32_t *data, const uint32_t *key) {
uint32_t v0=data[0], v1=data[1], sum=0xC6EF3720, i; /* set up */
uint32_t delta=0x9e3779b9; /* a key schedule constant */
uint32_t k0=key[0], k1=key[1], k2=key[2], k3=key[3]; /* cache key */
for (i=0; i<32; i++) { /* basic cycle start */
v1 -= ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
v0 -= ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
sum -= delta;
} /* end cycle */
data[0]=v0; data[1]=v1;
}
int main() {
size_t sizeKey = 4 * sizeof(uint32_t);
size_t sizeData = 2 * sizeof(uint32_t);
uint32_t *key = (uint32_t *)malloc(sizeKey);
uint32_t *data = (uint32_t *)malloc(sizeData);
printf("Starting (this may take a while)...\n");
double start = omp_get_wtime();
/////////////////////////////////////////////////////////////////
uint32_t *d_key, *d_data, *d_keyout;
hipMalloc((void **)&d_key, sizeKey);
hipMalloc((void **)&d_data, sizeData);
hipMalloc((void **)&d_keyout, sizeKey);
for (uint32_t n = 0; n < 128; n++) {
key[0] = key[1] = key[2] = key[3] = n;
hipMemcpy(d_key, key, sizeKey, hipMemcpyHostToDevice);
hipMemcpy(d_data, orig_data, sizeData, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( encryptGpu), dim3(2048), dim3(1024), 0, 0, d_data, d_key, d_keyout);
}
hipDeviceSynchronize();
hipMemcpy(key, d_keyout, sizeKey, hipMemcpyDeviceToHost);
hipFree(d_data);
hipFree(d_key);
////////////////////////////////////////////////////////////////
printf("Elapsed time: %f seconds\n", omp_get_wtime() - start);
/* Assume the above loop will find a key */
printf("Found key: (hexadecimal) %08x %08x %08x %08x\n", key[0], key[1], key[2], key[3]);
data[0] = orig_data[0];
data[1] = orig_data[1];
printf("The original values are (hexadecimal): %08x %08x\n", data[0], data[1]);
encrypt(data, key);
printf("The encrypted values are (hexadecimal): %08x %08x\n", data[0], data[1]);
printf("They should be: %08x %08x\n", encrypted[0], encrypted[1]);
if (data[0] == encrypted[0] && data[1] == encrypted[1]) {
printf("SUCCESS!\n");
return 0;
} else {
printf("FAILED\n");
return 1;
}
} | b18a44a281bac8bcbb80dbdea718bbe9d54b9671.cu | // Brute-force Key Search - TEA Encryption with 31-bit Key
// MP4, Spring 2016, GPU Programming @ Auburn University
#include <stdio.h>
#include <stdint.h>
#include <omp.h>
#include <cuda.h>
#include <cuda_runtime.h>
/* Data to test with (this should be easy to change) */
const uint32_t orig_data[2] = { 0xDEADBEEF, 0x0BADF00D };
const uint32_t encrypted[2] = { 0xFF305F9B, 0xB9BDCECE };
void encrypt(uint32_t *data, const uint32_t *key) {
uint32_t v0=data[0], v1=data[1], sum=0, i; /* set up */
uint32_t delta=0x9e3779b9; /* a key schedule constant */
uint32_t k0=key[0], k1=key[1], k2=key[2], k3=key[3]; /* cache key */
for (i=0; i < 32; i++) { /* basic cycle start */
sum += delta;
v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
} /* end cycle */
data[0]=v0; data[1]=v1;
}
__global__ static void encryptGpu(uint32_t *data, uint32_t *key, uint32_t *key_out) {
/* Try every possible 28-bit integer... */
const uint32_t encrypted[2] = { 0xFF305F9B, 0xB9BDCECE};
uint32_t k = blockDim.x * blockDim.y + threadIdx.x;
if (k <= 0x0FFFFFFF) {
uint32_t v0 = data[0], v1 = data[1], sum = 0, i; /* set up */
uint32_t delta=0x9e3779b9; /* a key schedule constant */
uint32_t k0 = k + key[0] * 2048 * 1024, k1 = k + key[1] * 2048 * 1024, k2 = k + key[2] * 2048 * 1024, k3 = k + key[3] * 2048 * 1024;
for (i=0; i < 32; i++) { /* basic cycle start */
sum += delta;
v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
} /* end cycle */
/* Did we get the correct encrypted values? */
if (v0 == encrypted[0] && v1 == encrypted[1]) {
key_out[0] = k0; key_out[1] = k1; key_out[2] = k2; key_out[3] = k3;
}
}
}
void decrypt(uint32_t *data, const uint32_t *key) {
uint32_t v0=data[0], v1=data[1], sum=0xC6EF3720, i; /* set up */
uint32_t delta=0x9e3779b9; /* a key schedule constant */
uint32_t k0=key[0], k1=key[1], k2=key[2], k3=key[3]; /* cache key */
for (i=0; i<32; i++) { /* basic cycle start */
v1 -= ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
v0 -= ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
sum -= delta;
} /* end cycle */
data[0]=v0; data[1]=v1;
}
int main() {
size_t sizeKey = 4 * sizeof(uint32_t);
size_t sizeData = 2 * sizeof(uint32_t);
uint32_t *key = (uint32_t *)malloc(sizeKey);
uint32_t *data = (uint32_t *)malloc(sizeData);
printf("Starting (this may take a while)...\n");
double start = omp_get_wtime();
/////////////////////////////////////////////////////////////////
uint32_t *d_key, *d_data, *d_keyout;
cudaMalloc((void **)&d_key, sizeKey);
cudaMalloc((void **)&d_data, sizeData);
cudaMalloc((void **)&d_keyout, sizeKey);
for (uint32_t n = 0; n < 128; n++) {
key[0] = key[1] = key[2] = key[3] = n;
cudaMemcpy(d_key, key, sizeKey, cudaMemcpyHostToDevice);
cudaMemcpy(d_data, orig_data, sizeData, cudaMemcpyHostToDevice);
encryptGpu<<<2048, 1024>>>(d_data, d_key, d_keyout);
}
cudaDeviceSynchronize();
cudaMemcpy(key, d_keyout, sizeKey, cudaMemcpyDeviceToHost);
cudaFree(d_data);
cudaFree(d_key);
////////////////////////////////////////////////////////////////
printf("Elapsed time: %f seconds\n", omp_get_wtime() - start);
/* Assume the above loop will find a key */
printf("Found key: (hexadecimal) %08x %08x %08x %08x\n", key[0], key[1], key[2], key[3]);
data[0] = orig_data[0];
data[1] = orig_data[1];
printf("The original values are (hexadecimal): %08x %08x\n", data[0], data[1]);
encrypt(data, key);
printf("The encrypted values are (hexadecimal): %08x %08x\n", data[0], data[1]);
printf("They should be: %08x %08x\n", encrypted[0], encrypted[1]);
if (data[0] == encrypted[0] && data[1] == encrypted[1]) {
printf("SUCCESS!\n");
return 0;
} else {
printf("FAILED\n");
return 1;
}
} |
3a537ffb27ca4cee72babe389d765408199dbdbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i],
gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void initialData(float *ip, int size)
{
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for (int idx = 0; idx < N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
int dev = 0;
CHECK(hipSetDevice(dev));
int nElem = 1 << 5;
printf("Vector size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_B, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_C, gpuRef, nBytes, hipMemcpyHostToDevice));
dim3 block (nElem);
dim3 grid (1);
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem);
printf("Execution configure <<<%d, %d>>>\n", grid.x, block.x);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
sumArraysOnHost(h_A, h_B, hostRef, nElem);
checkResult(hostRef, gpuRef, nElem);
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
CHECK(hipDeviceReset());
return(0);
}
| 3a537ffb27ca4cee72babe389d765408199dbdbf.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i],
gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void initialData(float *ip, int size)
{
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for (int idx = 0; idx < N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
int dev = 0;
CHECK(cudaSetDevice(dev));
int nElem = 1 << 5;
printf("Vector size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice));
dim3 block (nElem);
dim3 grid (1);
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C, nElem);
printf("Execution configure <<<%d, %d>>>\n", grid.x, block.x);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
sumArraysOnHost(h_A, h_B, hostRef, nElem);
checkResult(hostRef, gpuRef, nElem);
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
CHECK(cudaDeviceReset());
return(0);
}
|
2a7e6c4b31d08cac8e4f129f5b396083f98474e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
__global__ void enforceLU( double *matrix, int lda )
{
int i = threadIdx.x;
int j = blockIdx.x;
if( i <= j )
matrix[i + j*lda] = (i == j) ? 1 : 0;
}
}
// zeros out the whole part of matrix above the diagonal (not just a block)
extern "C" {
__global__ void zerosU(int m, int n, double *matrix, int lda, int incl)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
if (i < j)
matrix[i + j*lda] = 0;
else if (i == j && incl)
matrix[i + j*lda] = 0;
}
}
// zeros out the whole part of matrix below the diagonal
extern "C" {
__global__ void zerosL(int m, int n, double *matrix, int lda, int incl)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
if( i > j )
matrix[i + j*lda] = 0;
else if (i == j && incl)
matrix[i + j*lda] = 0;
}
} | 2a7e6c4b31d08cac8e4f129f5b396083f98474e0.cu |
extern "C" {
__global__ void enforceLU( double *matrix, int lda )
{
int i = threadIdx.x;
int j = blockIdx.x;
if( i <= j )
matrix[i + j*lda] = (i == j) ? 1 : 0;
}
}
// zeros out the whole part of matrix above the diagonal (not just a block)
extern "C" {
__global__ void zerosU(int m, int n, double *matrix, int lda, int incl)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
if (i < j)
matrix[i + j*lda] = 0;
else if (i == j && incl)
matrix[i + j*lda] = 0;
}
}
// zeros out the whole part of matrix below the diagonal
extern "C" {
__global__ void zerosL(int m, int n, double *matrix, int lda, int incl)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
if( i > j )
matrix[i + j*lda] = 0;
else if (i == j && incl)
matrix[i + j*lda] = 0;
}
} |
d7d621a529fd1a2d1cd82f19d6725e98c4e1994f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* CIS565 CUDA Checker: A simple CUDA hello-world style program for
Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
Written by Yining Karl Li, Liam Boone, and Harmony Li.
Copyright (c) 2014 University of Pennsylvania */
#include <stdio.h>
#include <iostream>
#include "kernel.h"
void CheckCUDAError(const char *msg){
hipError_t err = hipGetLastError();
if(hipSuccess != err){
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err));
exit(EXIT_FAILURE);
}
}
// Kernel that writes the image to the OpenGL PBO directly.
__global__ void CreateVersionVisualization(uchar4* PBOpos, int width, int height, int major,
int minor){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * width);
if(x<=width && y<=height){
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = 0;
PBOpos[index].y = 0;
PBOpos[index].z = 0;
if(y<height/2){
if(major==1){
PBOpos[index].x = 255;
}else if(major==2){
PBOpos[index].y = 255;
}else if(major==3){
PBOpos[index].z = 255;
}
}else{
if(minor==0){
PBOpos[index].x = 255;
}else if(minor==1){
PBOpos[index].y = 255;
}else if(minor==2){
PBOpos[index].z = 255;
}else if(minor==3){
PBOpos[index].x = 255;
PBOpos[index].y = 255;
}else if(minor==5){
PBOpos[index].z = 255;
PBOpos[index].y = 255;
}
}
}
}
// Wrapper for the __global__ call that sets up the kernel calls
void CudaKernel(uchar4* PBOpos, int width, int height, int major, int minor){
// set up crucial magic
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(width/float(tileSize)), (int)ceil(height/float(tileSize)));
//kernel launches
hipLaunchKernelGGL(( CreateVersionVisualization), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, width, height,
major, minor);
// make certain the kernel has completed
hipDeviceSynchronize();
CheckCUDAError("Kernel failed!");
}
| d7d621a529fd1a2d1cd82f19d6725e98c4e1994f.cu | /* CIS565 CUDA Checker: A simple CUDA hello-world style program for
Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
Written by Yining Karl Li, Liam Boone, and Harmony Li.
Copyright (c) 2014 University of Pennsylvania */
#include <stdio.h>
#include <iostream>
#include "kernel.h"
void CheckCUDAError(const char *msg){
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err){
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
}
// Kernel that writes the image to the OpenGL PBO directly.
__global__ void CreateVersionVisualization(uchar4* PBOpos, int width, int height, int major,
int minor){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * width);
if(x<=width && y<=height){
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = 0;
PBOpos[index].y = 0;
PBOpos[index].z = 0;
if(y<height/2){
if(major==1){
PBOpos[index].x = 255;
}else if(major==2){
PBOpos[index].y = 255;
}else if(major==3){
PBOpos[index].z = 255;
}
}else{
if(minor==0){
PBOpos[index].x = 255;
}else if(minor==1){
PBOpos[index].y = 255;
}else if(minor==2){
PBOpos[index].z = 255;
}else if(minor==3){
PBOpos[index].x = 255;
PBOpos[index].y = 255;
}else if(minor==5){
PBOpos[index].z = 255;
PBOpos[index].y = 255;
}
}
}
}
// Wrapper for the __global__ call that sets up the kernel calls
void CudaKernel(uchar4* PBOpos, int width, int height, int major, int minor){
// set up crucial magic
int tileSize = 16;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(width/float(tileSize)), (int)ceil(height/float(tileSize)));
//kernel launches
CreateVersionVisualization<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, width, height,
major, minor);
// make certain the kernel has completed
cudaThreadSynchronize();
CheckCUDAError("Kernel failed!");
}
|
a028545a298d24d618fdd369f8e0fce841ddbe60.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int numElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,numElements);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,numElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,numElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a028545a298d24d618fdd369f8e0fce841ddbe60.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int numElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sum<<<gridBlock,threadBlock>>>(input,output,numElements);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sum<<<gridBlock,threadBlock>>>(input,output,numElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sum<<<gridBlock,threadBlock>>>(input,output,numElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bdc34b66a94b5738ffe8bdc0ba3e3848e856d075.hip | // !!! This is a file automatically generated by hipify!!!
/*Function to update the the solution!*/
/*------------------ Library Dependencies --------------------------------*/
#include <hip/hip_runtime.h>
#include <cmath>
#include "hip/hip_runtime.h"
#include <hip/device_functions.h>
/*-----------------------Function Dependencies!----------------------*/
#include "primconsflux.cuh"
__global__ soln_update(double3 *U,double3 *V, const double3* Flux,const double dt)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x + gr_ngc;
double3 temp;
if(tid < GRID_SIZE - gr_ngc) {
temp.x = U.x[tid] - dtx*(Flux.x[i+1] - Flux.x[tid]);
temp.y = U.y[tid] - dtx*(Flux.y[i+1] - Flux.y[tid]);
temp.z = U.z[tid] - dtx*(Flux.z[i+1] - Flux.z[tid]);
U[tid] = temp;
__syncthreads();
cons2prim(U[tid], V[tid]);
}
}
| bdc34b66a94b5738ffe8bdc0ba3e3848e856d075.cu | /*Function to update the the solution!*/
/*------------------ Library Dependencies --------------------------------*/
#include <cuda.h>
#include <cmath>
#include "cuda_runtime.h"
#include <device_functions.h>
/*-----------------------Function Dependencies!----------------------*/
#include "primconsflux.cuh"
__global__ soln_update(double3 *U,double3 *V, const double3* Flux,const double dt)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x + gr_ngc;
double3 temp;
if(tid < GRID_SIZE - gr_ngc) {
temp.x = U.x[tid] - dtx*(Flux.x[i+1] - Flux.x[tid]);
temp.y = U.y[tid] - dtx*(Flux.y[i+1] - Flux.y[tid]);
temp.z = U.z[tid] - dtx*(Flux.z[i+1] - Flux.z[tid]);
U[tid] = temp;
__syncthreads();
cons2prim(U[tid], V[tid]);
}
}
|
16a35671b85404745da528faa508fed2f7f1f934.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nms_cuda.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
typedef unsigned long long MaskType;
const long numThreadsPerBlock = sizeof(MaskType) * 8;
__device__ inline float iou(const float *bbox1, const float *bbox2) {
float intersectionLeft = max(bbox1[0], bbox2[0]);
float intersectionTop = max(bbox1[1], bbox2[1]);
float intersectionRight = min(bbox1[2], bbox2[2]);
float intersectionBottom = min(bbox1[3], bbox2[3]);
float intersectionWidth = max(intersectionRight - intersectionLeft, 0.f);
float intersectionHeight = max(intersectionBottom - intersectionTop, 0.f);
float intersectionArea = intersectionWidth * intersectionHeight;
float bbox1Area = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]);
float bbox2Area = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]);
return intersectionArea / (bbox1Area + bbox2Area - intersectionArea);
}
__global__ void nms_kernel(const float *bboxes, long numBoxes, float threshold, MaskType *suppressionMask) {
int i;
int bidX = blockIdx.x;
int bidY = blockIdx.y;
int tid = threadIdx.x;
const long blockBoxStartX = bidX * numThreadsPerBlock;
const long blockBoxStartY = bidY * numThreadsPerBlock;
const long blockBoxEndX = min(blockBoxStartX + numThreadsPerBlock, numBoxes);
const long blockBoxEndY = min(blockBoxStartY + numThreadsPerBlock, numBoxes);
const long currentBoxY = blockBoxStartY + tid;
if (currentBoxY < blockBoxEndY) {
MaskType suppression = 0;
const float *currentBox = bboxes + currentBoxY * 4;
for (i = 0; i < blockBoxEndX - blockBoxStartX; ++i) {
long targetBoxX = blockBoxStartX + i;
if (targetBoxX > currentBoxY) {
const float *targetBox = bboxes + targetBoxX * 4;
if (iou(currentBox, targetBox) > threshold) {
suppression |= 1ULL << i;
}
}
}
const long numBlockCols = DIVUP(numBoxes, numThreadsPerBlock);
suppressionMask[currentBoxY * numBlockCols + bidX] = suppression;
}
}
void nms(const float *bboxesInDevice, long numBoxes, float threshold, long *keepIndices, long *numKeepBoxes) {
int i, j;
const long numBlockCols = DIVUP(numBoxes, numThreadsPerBlock);
MaskType *suppressionMaskInDevice;
hipMalloc(&suppressionMaskInDevice, sizeof(MaskType) * numBoxes * numBlockCols);
dim3 blocks(numBlockCols, numBlockCols);
dim3 threads(numThreadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, bboxesInDevice, numBoxes, threshold, suppressionMaskInDevice);
MaskType *suppressionMask = (MaskType *) malloc(sizeof(MaskType) * numBoxes * numBlockCols);
hipMemcpy(suppressionMask, suppressionMaskInDevice, sizeof(MaskType) * numBoxes * numBlockCols, hipMemcpyDeviceToHost);
MaskType *maskRow = (MaskType *) malloc(sizeof(MaskType) * numBlockCols);
memset(maskRow, 0, sizeof(MaskType) * numBlockCols);
long nKeepBoxes = 0;
for (i = 0; i < numBoxes; ++i) {
long block = i / numThreadsPerBlock;
long offset = i % numThreadsPerBlock;
if (!(maskRow[block] & (1ULL << offset))) {
keepIndices[nKeepBoxes++] = i;
for (j = 0; j < numBlockCols; ++j) {
maskRow[j] |= suppressionMask[i * numBlockCols + j];
}
}
}
*numKeepBoxes = nKeepBoxes;
hipFree(suppressionMaskInDevice);
free(suppressionMask);
free(maskRow);
}
| 16a35671b85404745da528faa508fed2f7f1f934.cu | #include "nms_cuda.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
typedef unsigned long long MaskType;
const long numThreadsPerBlock = sizeof(MaskType) * 8;
__device__ inline float iou(const float *bbox1, const float *bbox2) {
float intersectionLeft = max(bbox1[0], bbox2[0]);
float intersectionTop = max(bbox1[1], bbox2[1]);
float intersectionRight = min(bbox1[2], bbox2[2]);
float intersectionBottom = min(bbox1[3], bbox2[3]);
float intersectionWidth = max(intersectionRight - intersectionLeft, 0.f);
float intersectionHeight = max(intersectionBottom - intersectionTop, 0.f);
float intersectionArea = intersectionWidth * intersectionHeight;
float bbox1Area = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]);
float bbox2Area = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]);
return intersectionArea / (bbox1Area + bbox2Area - intersectionArea);
}
__global__ void nms_kernel(const float *bboxes, long numBoxes, float threshold, MaskType *suppressionMask) {
int i;
int bidX = blockIdx.x;
int bidY = blockIdx.y;
int tid = threadIdx.x;
const long blockBoxStartX = bidX * numThreadsPerBlock;
const long blockBoxStartY = bidY * numThreadsPerBlock;
const long blockBoxEndX = min(blockBoxStartX + numThreadsPerBlock, numBoxes);
const long blockBoxEndY = min(blockBoxStartY + numThreadsPerBlock, numBoxes);
const long currentBoxY = blockBoxStartY + tid;
if (currentBoxY < blockBoxEndY) {
MaskType suppression = 0;
const float *currentBox = bboxes + currentBoxY * 4;
for (i = 0; i < blockBoxEndX - blockBoxStartX; ++i) {
long targetBoxX = blockBoxStartX + i;
if (targetBoxX > currentBoxY) {
const float *targetBox = bboxes + targetBoxX * 4;
if (iou(currentBox, targetBox) > threshold) {
suppression |= 1ULL << i;
}
}
}
const long numBlockCols = DIVUP(numBoxes, numThreadsPerBlock);
suppressionMask[currentBoxY * numBlockCols + bidX] = suppression;
}
}
void nms(const float *bboxesInDevice, long numBoxes, float threshold, long *keepIndices, long *numKeepBoxes) {
int i, j;
const long numBlockCols = DIVUP(numBoxes, numThreadsPerBlock);
MaskType *suppressionMaskInDevice;
cudaMalloc(&suppressionMaskInDevice, sizeof(MaskType) * numBoxes * numBlockCols);
dim3 blocks(numBlockCols, numBlockCols);
dim3 threads(numThreadsPerBlock);
nms_kernel<<<blocks, threads>>>(bboxesInDevice, numBoxes, threshold, suppressionMaskInDevice);
MaskType *suppressionMask = (MaskType *) malloc(sizeof(MaskType) * numBoxes * numBlockCols);
cudaMemcpy(suppressionMask, suppressionMaskInDevice, sizeof(MaskType) * numBoxes * numBlockCols, cudaMemcpyDeviceToHost);
MaskType *maskRow = (MaskType *) malloc(sizeof(MaskType) * numBlockCols);
memset(maskRow, 0, sizeof(MaskType) * numBlockCols);
long nKeepBoxes = 0;
for (i = 0; i < numBoxes; ++i) {
long block = i / numThreadsPerBlock;
long offset = i % numThreadsPerBlock;
if (!(maskRow[block] & (1ULL << offset))) {
keepIndices[nKeepBoxes++] = i;
for (j = 0; j < numBlockCols; ++j) {
maskRow[j] |= suppressionMask[i * numBlockCols + j];
}
}
}
*numKeepBoxes = nKeepBoxes;
cudaFree(suppressionMaskInDevice);
free(suppressionMask);
free(maskRow);
}
|
93c07d6850ce206c28b596ea672caf43630176c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
/*
* Build out this kernel.
*/
int val = 0;
int i_x = blockIdx.x * blockDim.x + threadIdx.x;
int i_y = blockIdx.y * blockDim.y + threadIdx.y;
int s_x = gridDim.x * blockDim.x;
int s_y = gridDim.y * blockDim.y;
for( int row = i_x; row < N; row+=s_x )
for( int col = i_y; col < N; col+=s_y )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
/*
* This CPU function already works, and will run to create a solution matrix
* against which to verify your work building out the matrixMulGPU kernel.
*/
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
int ind = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
//if (ind < 5){
//printf("c[row:%d * N:%d + col:%d] = val:%d\n", row,N,col,val);
//}
ind++;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu; // Allocate a solution matrix for both the CPU and the GPU operations
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
hipMallocManaged (&a, size);
hipMallocManaged (&b, size);
hipMallocManaged (&c_cpu, size);
hipMallocManaged (&c_gpu, size);
// Initialize memory; create 2D matrices
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
/*
* Assign `threads_per_block` and `number_of_blocks` 2D values
* that can be used in matrixMulGPU above.
*/
dim3 threads_per_block(64, 64, 1);
dim3 number_of_blocks(4, 4, 1);
hipLaunchKernelGGL(( matrixMulGPU) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c_gpu );
hipDeviceSynchronize();
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d], values: c_cpu:%d and c_gpu:%d\n",
row, col,
c_cpu[row * N + col],
c_gpu[row * N + col]
);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
hipFree(a); hipFree(b);
hipFree( c_cpu ); hipFree( c_gpu );
}
| 93c07d6850ce206c28b596ea672caf43630176c4.cu | #include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
/*
* Build out this kernel.
*/
int val = 0;
int i_x = blockIdx.x * blockDim.x + threadIdx.x;
int i_y = blockIdx.y * blockDim.y + threadIdx.y;
int s_x = gridDim.x * blockDim.x;
int s_y = gridDim.y * blockDim.y;
for( int row = i_x; row < N; row+=s_x )
for( int col = i_y; col < N; col+=s_y )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
/*
* This CPU function already works, and will run to create a solution matrix
* against which to verify your work building out the matrixMulGPU kernel.
*/
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
int ind = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
//if (ind < 5){
//printf("c[row:%d * N:%d + col:%d] = val:%d\n", row,N,col,val);
//}
ind++;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu; // Allocate a solution matrix for both the CPU and the GPU operations
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
cudaMallocManaged (&a, size);
cudaMallocManaged (&b, size);
cudaMallocManaged (&c_cpu, size);
cudaMallocManaged (&c_gpu, size);
// Initialize memory; create 2D matrices
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
/*
* Assign `threads_per_block` and `number_of_blocks` 2D values
* that can be used in matrixMulGPU above.
*/
dim3 threads_per_block(64, 64, 1);
dim3 number_of_blocks(4, 4, 1);
matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu );
cudaDeviceSynchronize();
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d], values: c_cpu:%d and c_gpu:%d\n",
row, col,
c_cpu[row * N + col],
c_gpu[row * N + col]
);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
cudaFree(a); cudaFree(b);
cudaFree( c_cpu ); cudaFree( c_gpu );
}
|
0d3c4e0932d72e5dfc3286a460eeb05e3ec4b595.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reduce.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gdata = NULL;
hipMalloc(&gdata, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
size_t n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, gdata,out,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, gdata,out,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reduce), dim3(gridBlock),dim3(threadBlock), 0, 0, gdata,out,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0d3c4e0932d72e5dfc3286a460eeb05e3ec4b595.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reduce.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gdata = NULL;
cudaMalloc(&gdata, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
size_t n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reduce<<<gridBlock,threadBlock>>>(gdata,out,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reduce<<<gridBlock,threadBlock>>>(gdata,out,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reduce<<<gridBlock,threadBlock>>>(gdata,out,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
186002d80d620904a4c712720028f11f61543cb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Inter-block reduction.
//
// Function gridReduce performs point-wise reductions of scalars across thread
// blocks. Thread blocks are disjointly partitioned into groups of thread
// blocks, "reduction segments," that are collectively defined by boolean
// template parameters, X_BLOCK, Y_BLOCK and Z_BLOCK. Each of X/Y/Z_BLOCK
// determines whether thread blocks along the dimension should be grouped into
// the same reduction segment. Cross-block reducitons are independently done
// within each segment and generates distinctive results per segment. For
// instance, if all of X/Y/Z_BLOCK are true, reductions will be done across all
// thread blocks since there will be just a single segment consisting of all
// thread blocks. If none of them are true, each thread block will become a
// segment by itself, so no reduction will be performed.
//
// The input scalars to reduce within each segment are a certain subset of
// thread-private scalars provided as part of the gridReduce function
// parameters. Boolean template parameters, X_THREAD, Y_THREAD and Z_THREAD,
// determine which subset of the scalars should be used for inter-block
// reductions. Specifically, all the input scalars of threads along each
// dimension will be used when X/Y/Z_THREAD are true. Otherwise, only the value
// held at offset 0 of each dimension will be used. Thus, for example, if all of
// X/Y/Z_THREAD are true, the scalars of all threads in each block will
// participate in inter-block reductions. If all of them are false, only one
// scalar of the thread at threadIdx.x == threadIdx.y == threadIdx.z == 0 will
// be used. In the code below, we call the subset of threads a "reduction
// block."
//
// Inter-block reductions perform point-wise reductions of scalars of reduction
// blocks within each reduction segment. More specifically, let rb be a
// reduction block and rs be a reduction segment. Let IN(thread_idx, block_idx)
// denote the input scalar of thread at thread_idx and block_idx. The result of
// each reduction segment, OUT(thread_idx, block_idx_out), is defined only for
// each thread_idx in thread block block_idx_out in the segment as follows:
//
// OUT(thread_idx, block_idx_out) =
// Reduction of IN(thread_idx, block_idx) for
// all block_idx in a reduction segment
//
// OUT is not given for all threads that are not in block_idx_out and the
// reduction block.
//
// See also the function comment of gridReduce.
namespace reduction {
// Utility functions
template <typename _dim3>
__device__ __forceinline__ size_t size(const _dim3& d) {
return (size_t)d.x * (size_t)d.y * (size_t)d.z;
}
#define isize(d) d.x* d.y* d.z
template <typename _dim3pos, typename _dim3dim>
__device__ __forceinline__ size_t
offset(const _dim3pos& pos, const _dim3dim& dim) {
return (size_t)pos.x + (size_t)pos.y * (size_t)dim.x +
(size_t)pos.z * (size_t)dim.x * (size_t)dim.y;
}
#define ioffset(pos, dim) pos.x + pos.y* dim.x + pos.z* dim.x* dim.y
// Returns dim3 of each reduction segment.
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3>
__device__ dim3 dimension_of_reduction_segment(const _dim3& grid_dim) {
return dim3{
X_BLOCK ? grid_dim.x : 1,
Y_BLOCK ? grid_dim.y : 1,
Z_BLOCK ? grid_dim.z : 1};
}
// Returns the number of blocks in each reduction segment.
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3>
__device__ size_t size_of_reduction_segment(const _dim3& grid_dim) {
return size(
dimension_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(grid_dim));
}
// Returns the total number of reduction segments.
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3>
__device__ size_t number_of_reduction_segments(const _dim3& grid_dim) {
return (X_BLOCK ? 1 : grid_dim.x) * (Y_BLOCK ? 1 : grid_dim.y) *
(Z_BLOCK ? 1 : grid_dim.z);
}
// Returns the 1-D index of the segment of thread block of block_idx.
template <
bool X_BLOCK,
bool Y_BLOCK,
bool Z_BLOCK,
typename _dim3bi,
typename _dim3gd>
__device__ size_t
index_of_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) {
size_t seg_idx = 0;
if (!Z_BLOCK)
seg_idx += block_idx.z;
if (!Y_BLOCK)
seg_idx = seg_idx * grid_dim.y + block_idx.y;
if (!X_BLOCK)
seg_idx = seg_idx * grid_dim.x + block_idx.x;
return seg_idx;
}
// Returns the offset of thread block in its reduction segment.
template <
bool X_BLOCK,
bool Y_BLOCK,
bool Z_BLOCK,
typename _dim3bi,
typename _dim3gd>
__device__ size_t
offset_in_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) {
size_t offset = 0;
if (Z_BLOCK)
offset = offset * grid_dim.z + block_idx.z;
if (Y_BLOCK)
offset = offset * grid_dim.y + block_idx.y;
if (X_BLOCK)
offset = offset * grid_dim.x + block_idx.x;
return offset;
}
// Returns dim3 of each reduction block.
template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3>
__device__ dim3 dimension_of_reduction_block(const _dim3& block_dim) {
return dim3{
X_THREAD ? block_dim.x : 1,
Y_THREAD ? block_dim.y : 1,
Z_THREAD ? block_dim.z : 1};
}
// Returns the number of threads of each reduction block.
template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3>
__device__ int size_of_reduction_block(const _dim3& block_dim) {
auto tmp_dim =
dimension_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(block_dim);
return isize(tmp_dim);
}
// Returns the linear offset of a thread in a reduction block.
template <
bool X_THREAD,
bool Y_THREAD,
bool Z_THREAD,
typename _dim3ti,
typename _dim3bd>
__device__ int offset_in_reduction_block(
const _dim3ti& thread_idx,
const _dim3bd& block_dim) {
int offset = 0;
if (Z_THREAD)
offset += thread_idx.z;
if (Y_THREAD)
offset = offset * block_dim.y + thread_idx.y;
if (X_THREAD)
offset = offset * block_dim.x + thread_idx.x;
return offset;
}
// Reduces all the reduction blocks in each reduction segment.
//
// This is only used by one thread block per reduction segment. The input
// reduction blocks of the segment are stored in an intermediate buffer pointed
// by parameter in. Template parameters X/Y/Z_THREAD denote how the reduction
// block is formed.
//
// The size of a reduction block is by definition smaller or equal to the size
// of a thread block. We use the remaining threads to parallelize reductions
// across reduction blocks. For example, when X/Y/Z_THREAD = {true, false,
// false}, we use blockDim.y*blockDim.z threads for each output value. This is
// done first by loading the input values in parallel and then by reducing
// across threads of dimensions whose XYZ_THREAD are false.
//
// Note that what is done here after the loading from global memory is similar
// to what the existing blockReduce function does. The main difference is that
// the logical block to reduce is a 2D domain where the leading dimension is the
// size of a reduction block and the second dimension is the remaining factor in
// each thread block. For example, when X/Y/Z_THREAD = {false, true, false}, the
// threads are arranged as (blockDim.y, blockDim.x*blockDim.z). We do not reduce
// along the first dimension but only the second dimension. So, it is possible
// to reuse the existing blockReduce with dim3{blockDim.y,
// blockDim.x*blockDim.z} instead of blockDim and with X_THREAD and Y_THREAD
// being false and true, respectively. Also, it still need to shuffle the final
// output values to their actual corresponding threads. In the case of when
// X/Y/Z_THREAD = {false, true, false}, after the intra-block reduction, the
// final results will still be held by the first blockDim.y threads, which need
// to be transferred to threads at threadIdx.x == 0 and threadIdx.z == 0.
template <
bool X_THREAD,
bool Y_THREAD,
bool Z_THREAD,
typename T,
typename Func>
__device__ void gridReduceLastBlock(
T& out,
const T* in,
const size_t in_size,
Func reduction_op,
T* shared_buf,
bool read_write_pred,
T init_val) {
const int tid = ioffset(threadIdx, blockDim);
const int block_size = isize(blockDim);
const int rblock_size =
size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim);
T inp = init_val;
if (tid < in_size) {
inp = in[tid];
}
for (size_t i = tid + block_size; i < in_size; i += block_size) {
reduction_op(inp, in[i]);
}
const auto should_write = (X_THREAD || threadIdx.x == 0) &&
(Y_THREAD || threadIdx.y == 0) && (Z_THREAD || threadIdx.z == 0);
auto rem_size = block_size / rblock_size;
if (rem_size > 1) {
const int rblock_offset = tid % rblock_size;
const int rblock_idx = tid / rblock_size;
blockReduce<false, true, false>(
inp,
inp,
reduction_op,
dim3{(unsigned)rblock_offset, (unsigned)rblock_idx, 0},
dim3{(unsigned)rblock_size, (unsigned)rem_size},
shared_buf,
true,
init_val);
__syncthreads();
if (tid < rblock_size) {
shared_buf[tid] = inp;
}
__syncthreads();
if (should_write) {
inp = shared_buf[offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(
threadIdx, blockDim)];
}
}
if (should_write && read_write_pred) {
out = inp;
}
}
// Reduces per-thread values across thread blocks.
//
// Function parameters:
// - out: Per-thread output location
// - inp_val: Per-thread input value
// - reduction_op: Scalar reduction function
// - work_buf: Temporary buffer for cross-block reductions
// - sync_flags: A vector of integers for synchronizations
// - shared_buf: Shared memory buffer for intra-block reduction
//
// Return true when the thread block has the valid result.
//
// Template parameters:
// - X/Y/Z_BLOCK: When true, reduces across thread blocks along the X/Y/Z
// dimensions
// - X/Y/Z_THREAD: When true, all threads along the X/Y/Z dimensions participate
// in the cross-block reduction. Otherwise, only threads at offset 0 do.
// - T: Scalar data type of input/output data
// - Func: Type of scalara reduction function
//
// Template parameters X/Y/Z_BLOCK define a group of thread blocks that are
// reduced together. We call it a reduction segment. Some examples are:
//
// Case 1: X/Y/Z_BLOCK == true/true/true -> There is only one segment, which
// includes all thread blocks. It is effecively the same as the grid.
//
// Case 2: X/Y/Z_BLOCK == false/false/false -> Each thread block comprises an
// individual segment by itself.
//
// Case 3: X/Y/Z_BLOCK == true/false/false -> Each segment contains thread
// blocks that have the same blockDim.x. There will be blockDim.y*blockDim.z
// such segments.
//
// X/Y/Z_THREAD defines a sub region of a thread block that should be reduced
// with the sub regions of other thread blocks. We call it a reduction block.
// E.g.,
//
// Case 1: X/Y/Z_THREAD == false/false/false -> Only thread 0 participates in
// the cross-block reductions. The reduction block is 1x1x1 with thread 0.
//
// Case 2: X/Y/Z_THREAD == true/true/true-> All threads in a thread block
// participate in the cross-block reductions. The reduction block in this case
// is equivalent to the thread block.
//
// After the function completes, only one thread block per reduction segment
// gets valid reduction results. There is no guarantee which particular block
// gets the final results.
//
template <
bool X_BLOCK,
bool Y_BLOCK,
bool Z_BLOCK,
bool X_THREAD,
bool Y_THREAD,
bool Z_THREAD,
typename T,
typename Func>
__device__ bool gridReduce(
T& out,
T inp_val,
Func reduction_op,
volatile T* work_buf,
Tensor<int64_t, 1> sync_flags,
T* shared_buf,
bool read_write_pred,
T init_val) {
// Number of values to reduce in the grid dimensions
const auto seg_size =
size_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(gridDim);
// Index of the reduction we're performing out of the seg_size
const auto seg_idx =
index_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(blockIdx, gridDim);
// Number of threads we can use in final reduction, Seems to assume all
// threads in the block participate
const auto rblock_size =
size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim);
// advance to the offset for this segment
// index of reduction * size of the reduction * size of threads
work_buf += seg_idx * seg_size * rblock_size;
if ((X_THREAD || threadIdx.x == 0) && (Y_THREAD || threadIdx.y == 0) &&
(Z_THREAD || threadIdx.z == 0)) {
auto rblock_offset = offset_in_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(
blockIdx, gridDim);
auto thread_offset =
offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(
threadIdx, blockDim);
auto work_buf_offset = rblock_size * rblock_offset + thread_offset;
if (read_write_pred) {
work_buf[work_buf_offset] = inp_val;
} else {
work_buf[work_buf_offset] = init_val;
}
}
__syncthreads();
__shared__ bool last_block;
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
__threadfence();
// printf("%ld\n", sync_flags[seg_idx]);
auto old = (int64_t)atomicAdd((unsigned long long*)&sync_flags[seg_idx], 1);
last_block = old + 1 == seg_size;
// printf("Last_block = %d + 1 == %d\n", (int)old, (int)seg_size);
}
__syncthreads();
if (last_block) {
// printf("Last block %d %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z);
// final reduction
gridReduceLastBlock<X_THREAD, Y_THREAD, Z_THREAD>(
out,
(T*)work_buf,
seg_size * rblock_size,
reduction_op,
shared_buf,
read_write_pred,
init_val);
return true;
} else {
// printf("Not last block %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z);
return false;
}
}
} // namespace reduction
| 186002d80d620904a4c712720028f11f61543cb7.cu | // Inter-block reduction.
//
// Function gridReduce performs point-wise reductions of scalars across thread
// blocks. Thread blocks are disjointly partitioned into groups of thread
// blocks, "reduction segments," that are collectively defined by boolean
// template parameters, X_BLOCK, Y_BLOCK and Z_BLOCK. Each of X/Y/Z_BLOCK
// determines whether thread blocks along the dimension should be grouped into
// the same reduction segment. Cross-block reducitons are independently done
// within each segment and generates distinctive results per segment. For
// instance, if all of X/Y/Z_BLOCK are true, reductions will be done across all
// thread blocks since there will be just a single segment consisting of all
// thread blocks. If none of them are true, each thread block will become a
// segment by itself, so no reduction will be performed.
//
// The input scalars to reduce within each segment are a certain subset of
// thread-private scalars provided as part of the gridReduce function
// parameters. Boolean template parameters, X_THREAD, Y_THREAD and Z_THREAD,
// determine which subset of the scalars should be used for inter-block
// reductions. Specifically, all the input scalars of threads along each
// dimension will be used when X/Y/Z_THREAD are true. Otherwise, only the value
// held at offset 0 of each dimension will be used. Thus, for example, if all of
// X/Y/Z_THREAD are true, the scalars of all threads in each block will
// participate in inter-block reductions. If all of them are false, only one
// scalar of the thread at threadIdx.x == threadIdx.y == threadIdx.z == 0 will
// be used. In the code below, we call the subset of threads a "reduction
// block."
//
// Inter-block reductions perform point-wise reductions of scalars of reduction
// blocks within each reduction segment. More specifically, let rb be a
// reduction block and rs be a reduction segment. Let IN(thread_idx, block_idx)
// denote the input scalar of thread at thread_idx and block_idx. The result of
// each reduction segment, OUT(thread_idx, block_idx_out), is defined only for
// each thread_idx in thread block block_idx_out in the segment as follows:
//
// OUT(thread_idx, block_idx_out) =
// Reduction of IN(thread_idx, block_idx) for
// all block_idx in a reduction segment
//
// OUT is not given for all threads that are not in block_idx_out and the
// reduction block.
//
// See also the function comment of gridReduce.
namespace reduction {
// Utility functions
template <typename _dim3>
__device__ __forceinline__ size_t size(const _dim3& d) {
return (size_t)d.x * (size_t)d.y * (size_t)d.z;
}
#define isize(d) d.x* d.y* d.z
template <typename _dim3pos, typename _dim3dim>
__device__ __forceinline__ size_t
offset(const _dim3pos& pos, const _dim3dim& dim) {
return (size_t)pos.x + (size_t)pos.y * (size_t)dim.x +
(size_t)pos.z * (size_t)dim.x * (size_t)dim.y;
}
#define ioffset(pos, dim) pos.x + pos.y* dim.x + pos.z* dim.x* dim.y
// Returns dim3 of each reduction segment.
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3>
__device__ dim3 dimension_of_reduction_segment(const _dim3& grid_dim) {
return dim3{
X_BLOCK ? grid_dim.x : 1,
Y_BLOCK ? grid_dim.y : 1,
Z_BLOCK ? grid_dim.z : 1};
}
// Returns the number of blocks in each reduction segment.
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3>
__device__ size_t size_of_reduction_segment(const _dim3& grid_dim) {
return size(
dimension_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(grid_dim));
}
// Returns the total number of reduction segments.
template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3>
__device__ size_t number_of_reduction_segments(const _dim3& grid_dim) {
return (X_BLOCK ? 1 : grid_dim.x) * (Y_BLOCK ? 1 : grid_dim.y) *
(Z_BLOCK ? 1 : grid_dim.z);
}
// Returns the 1-D index of the segment of thread block of block_idx.
template <
bool X_BLOCK,
bool Y_BLOCK,
bool Z_BLOCK,
typename _dim3bi,
typename _dim3gd>
__device__ size_t
index_of_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) {
size_t seg_idx = 0;
if (!Z_BLOCK)
seg_idx += block_idx.z;
if (!Y_BLOCK)
seg_idx = seg_idx * grid_dim.y + block_idx.y;
if (!X_BLOCK)
seg_idx = seg_idx * grid_dim.x + block_idx.x;
return seg_idx;
}
// Returns the offset of thread block in its reduction segment.
template <
bool X_BLOCK,
bool Y_BLOCK,
bool Z_BLOCK,
typename _dim3bi,
typename _dim3gd>
__device__ size_t
offset_in_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) {
size_t offset = 0;
if (Z_BLOCK)
offset = offset * grid_dim.z + block_idx.z;
if (Y_BLOCK)
offset = offset * grid_dim.y + block_idx.y;
if (X_BLOCK)
offset = offset * grid_dim.x + block_idx.x;
return offset;
}
// Returns dim3 of each reduction block.
template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3>
__device__ dim3 dimension_of_reduction_block(const _dim3& block_dim) {
return dim3{
X_THREAD ? block_dim.x : 1,
Y_THREAD ? block_dim.y : 1,
Z_THREAD ? block_dim.z : 1};
}
// Returns the number of threads of each reduction block.
template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3>
__device__ int size_of_reduction_block(const _dim3& block_dim) {
auto tmp_dim =
dimension_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(block_dim);
return isize(tmp_dim);
}
// Returns the linear offset of a thread in a reduction block.
template <
bool X_THREAD,
bool Y_THREAD,
bool Z_THREAD,
typename _dim3ti,
typename _dim3bd>
__device__ int offset_in_reduction_block(
const _dim3ti& thread_idx,
const _dim3bd& block_dim) {
int offset = 0;
if (Z_THREAD)
offset += thread_idx.z;
if (Y_THREAD)
offset = offset * block_dim.y + thread_idx.y;
if (X_THREAD)
offset = offset * block_dim.x + thread_idx.x;
return offset;
}
// Reduces all the reduction blocks in each reduction segment.
//
// This is only used by one thread block per reduction segment. The input
// reduction blocks of the segment are stored in an intermediate buffer pointed
// by parameter in. Template parameters X/Y/Z_THREAD denote how the reduction
// block is formed.
//
// The size of a reduction block is by definition smaller or equal to the size
// of a thread block. We use the remaining threads to parallelize reductions
// across reduction blocks. For example, when X/Y/Z_THREAD = {true, false,
// false}, we use blockDim.y*blockDim.z threads for each output value. This is
// done first by loading the input values in parallel and then by reducing
// across threads of dimensions whose XYZ_THREAD are false.
//
// Note that what is done here after the loading from global memory is similar
// to what the existing blockReduce function does. The main difference is that
// the logical block to reduce is a 2D domain where the leading dimension is the
// size of a reduction block and the second dimension is the remaining factor in
// each thread block. For example, when X/Y/Z_THREAD = {false, true, false}, the
// threads are arranged as (blockDim.y, blockDim.x*blockDim.z). We do not reduce
// along the first dimension but only the second dimension. So, it is possible
// to reuse the existing blockReduce with dim3{blockDim.y,
// blockDim.x*blockDim.z} instead of blockDim and with X_THREAD and Y_THREAD
// being false and true, respectively. Also, it still need to shuffle the final
// output values to their actual corresponding threads. In the case of when
// X/Y/Z_THREAD = {false, true, false}, after the intra-block reduction, the
// final results will still be held by the first blockDim.y threads, which need
// to be transferred to threads at threadIdx.x == 0 and threadIdx.z == 0.
template <
bool X_THREAD,
bool Y_THREAD,
bool Z_THREAD,
typename T,
typename Func>
__device__ void gridReduceLastBlock(
T& out,
const T* in,
const size_t in_size,
Func reduction_op,
T* shared_buf,
bool read_write_pred,
T init_val) {
const int tid = ioffset(threadIdx, blockDim);
const int block_size = isize(blockDim);
const int rblock_size =
size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim);
T inp = init_val;
if (tid < in_size) {
inp = in[tid];
}
for (size_t i = tid + block_size; i < in_size; i += block_size) {
reduction_op(inp, in[i]);
}
const auto should_write = (X_THREAD || threadIdx.x == 0) &&
(Y_THREAD || threadIdx.y == 0) && (Z_THREAD || threadIdx.z == 0);
auto rem_size = block_size / rblock_size;
if (rem_size > 1) {
const int rblock_offset = tid % rblock_size;
const int rblock_idx = tid / rblock_size;
blockReduce<false, true, false>(
inp,
inp,
reduction_op,
dim3{(unsigned)rblock_offset, (unsigned)rblock_idx, 0},
dim3{(unsigned)rblock_size, (unsigned)rem_size},
shared_buf,
true,
init_val);
__syncthreads();
if (tid < rblock_size) {
shared_buf[tid] = inp;
}
__syncthreads();
if (should_write) {
inp = shared_buf[offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(
threadIdx, blockDim)];
}
}
if (should_write && read_write_pred) {
out = inp;
}
}
// Reduces per-thread values across thread blocks.
//
// Function parameters:
// - out: Per-thread output location
// - inp_val: Per-thread input value
// - reduction_op: Scalar reduction function
// - work_buf: Temporary buffer for cross-block reductions
// - sync_flags: A vector of integers for synchronizations
// - shared_buf: Shared memory buffer for intra-block reduction
//
// Return true when the thread block has the valid result.
//
// Template parameters:
// - X/Y/Z_BLOCK: When true, reduces across thread blocks along the X/Y/Z
// dimensions
// - X/Y/Z_THREAD: When true, all threads along the X/Y/Z dimensions participate
// in the cross-block reduction. Otherwise, only threads at offset 0 do.
// - T: Scalar data type of input/output data
// - Func: Type of scalara reduction function
//
// Template parameters X/Y/Z_BLOCK define a group of thread blocks that are
// reduced together. We call it a reduction segment. Some examples are:
//
// Case 1: X/Y/Z_BLOCK == true/true/true -> There is only one segment, which
// includes all thread blocks. It is effecively the same as the grid.
//
// Case 2: X/Y/Z_BLOCK == false/false/false -> Each thread block comprises an
// individual segment by itself.
//
// Case 3: X/Y/Z_BLOCK == true/false/false -> Each segment contains thread
// blocks that have the same blockDim.x. There will be blockDim.y*blockDim.z
// such segments.
//
// X/Y/Z_THREAD defines a sub region of a thread block that should be reduced
// with the sub regions of other thread blocks. We call it a reduction block.
// E.g.,
//
// Case 1: X/Y/Z_THREAD == false/false/false -> Only thread 0 participates in
// the cross-block reductions. The reduction block is 1x1x1 with thread 0.
//
// Case 2: X/Y/Z_THREAD == true/true/true-> All threads in a thread block
// participate in the cross-block reductions. The reduction block in this case
// is equivalent to the thread block.
//
// After the function completes, only one thread block per reduction segment
// gets valid reduction results. There is no guarantee which particular block
// gets the final results.
//
template <
bool X_BLOCK,
bool Y_BLOCK,
bool Z_BLOCK,
bool X_THREAD,
bool Y_THREAD,
bool Z_THREAD,
typename T,
typename Func>
__device__ bool gridReduce(
T& out,
T inp_val,
Func reduction_op,
volatile T* work_buf,
Tensor<int64_t, 1> sync_flags,
T* shared_buf,
bool read_write_pred,
T init_val) {
// Number of values to reduce in the grid dimensions
const auto seg_size =
size_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(gridDim);
// Index of the reduction we're performing out of the seg_size
const auto seg_idx =
index_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(blockIdx, gridDim);
// Number of threads we can use in final reduction, Seems to assume all
// threads in the block participate
const auto rblock_size =
size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim);
// advance to the offset for this segment
// index of reduction * size of the reduction * size of threads
work_buf += seg_idx * seg_size * rblock_size;
if ((X_THREAD || threadIdx.x == 0) && (Y_THREAD || threadIdx.y == 0) &&
(Z_THREAD || threadIdx.z == 0)) {
auto rblock_offset = offset_in_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(
blockIdx, gridDim);
auto thread_offset =
offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(
threadIdx, blockDim);
auto work_buf_offset = rblock_size * rblock_offset + thread_offset;
if (read_write_pred) {
work_buf[work_buf_offset] = inp_val;
} else {
work_buf[work_buf_offset] = init_val;
}
}
__syncthreads();
__shared__ bool last_block;
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
__threadfence();
// printf("%ld\n", sync_flags[seg_idx]);
auto old = (int64_t)atomicAdd((unsigned long long*)&sync_flags[seg_idx], 1);
last_block = old + 1 == seg_size;
// printf("Last_block = %d + 1 == %d\n", (int)old, (int)seg_size);
}
__syncthreads();
if (last_block) {
// printf("Last block %d %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z);
// final reduction
gridReduceLastBlock<X_THREAD, Y_THREAD, Z_THREAD>(
out,
(T*)work_buf,
seg_size * rblock_size,
reduction_op,
shared_buf,
read_write_pred,
init_val);
return true;
} else {
// printf("Not last block %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z);
return false;
}
}
} // namespace reduction
|
a9be3386d997fbb0e92e58d8c7948332b85ddc8c.hip | // !!! This is a file automatically generated by hipify!!!
#if 0
#include "solve.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <iostream>
#ifdef _WIN32
typedef unsigned int uint32_t;
//typedef unsigned short uint32_t;
#endif
using namespace std;
#define PROFILE 0
#define USE_GRID 1
#define USE_BOX_PRUNING 0
#define kRadius 0.1f
#define kMaxRadius (kRadius)
#define kInvCellEdge (0.5f/kMaxRadius)
#if USE_GRID
typedef uint32_t CellId;
#else
typedef float CellId;
#endif
struct GrainSystem
{
public:
Vec3* mPositions;
Vec3* mVelocities;
float* mRadii;
Vec3* mSortedPositions;
Vec3* mSortedVelocities;
float* mSortedRadii;
Vec3* mNewVelocities;
uint32_t* mCellStarts;
uint32_t* mCellEnds;
CellId* mCellIds;
uint32_t* mIndices;
uint32_t mNumGrains;
GrainParams mParams;
};
#if PROFILE
struct CudaTimer
{
CudaTimer(const char* name, hipEvent_t start, hipEvent_t stop, float& timer) : mTimer(timer), mName(name), mStart(start), mStop(stop)
{
hipEventRecord(mStart, 0);
}
~CudaTimer()
{
hipEventRecord(mStop, 0);
hipEventSynchronize(mStop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, mStart, mStop);
mTimer += elapsedTime;
//cout << mName << " took: " << elapsedTime << endl;
}
float& mTimer;
hipEvent_t mStart;
hipEvent_t mStop;
const char* mName;
};
#else
struct CudaTimer
{
CudaTimer(const char*, hipEvent_t, hipEvent_t, float& ) {}
};
#endif
void SortCellIndices(uint32_t* cellIds, uint32_t* particleIndices, uint32_t numGrains);
void SortCellIndices(float* cellIds, uint32_t* particleIndices, uint32_t numGrains);
__device__ inline float sqr(float x) { return x*x; }
// calculate collision impulse
__device__ inline Vec3 CollisionImpulse(Vec3 va, Vec3 vb, float ma, float mb, Vec3 n, float d, float baumgarte, float friction, float overlap)
{
// calculate relative velocity
Vec3 vd = vb-va;
// calculate relative normal velocity
float vn = Dot(vd, n);
Vec3 j = Vec3(0.0f, 0.0f, 0.0f);
//if (vn < 0.0f)
vn = min(vn, 0.0f);
{
// calculate relative tangential velocity
Vec3 vt = vd - n*vn;
float vtsq = Dot(vt, vt);
float rcpvt = rsqrtf(vtsq);// + 0.001f);
// position bias
float bias = baumgarte*min(d+overlap, 0.0f);
Vec3 jn = -(vn + bias)*n;
Vec3 jt = max(friction*vn*rcpvt, -1.0f)*vt;
// crappy static friction
if (fabsf(vtsq*rcpvt) < fabsf(friction*vn*2.0f) && vn < 0.0f)
jt = -vt;
// total mass
float msum = ma + mb;
// normal impulse
j = (jn + jt)*mb/msum;
}
return j;
}
#if USE_GRID
const uint32_t kGridDim = 128;
// transform a world space coordinate into cell coordinate
__device__ inline uint32_t GridCoord(float x, float invCellEdge)
{
// offset to handle negative numbers
float l = x+1000.0f;
uint32_t c = (uint32_t)(floorf(l*invCellEdge));
return c;
}
__device__ inline uint32_t GridHash(int x, int y, int z)
{
uint32_t cx = x & (kGridDim-1);
uint32_t cy = y & (kGridDim-1);
uint32_t cz = z & (kGridDim-1);
return cy*(kGridDim*kGridDim) + cx*kGridDim + cz;
}
/*
__device__ inline uint32_t GridHash(int x, int y, int z)
{
const uint32_t p1 = 73856093;
const uint32_t p2 = 19349663;
const uint32_t p3 = 53471161;
uint32_t n = x*p1 ^ y*p2 ^ z*p3;
return n&(kGridDim*kGridDim*kGridDim-1);
}
*/
__global__ void CreateCellIndices(const Vec3* positions, uint32_t* cellIds, uint32_t* particleIndices)
{
uint32_t i = blockIdx.x*blockDim.x + threadIdx.x;
Vec3 p = positions[i];
cellIds[i] = GridHash(GridCoord(p.x, kInvCellEdge), GridCoord(p.y, kInvCellEdge), GridCoord(p.z, kInvCellEdge));
particleIndices[i] = i;
}
__global__ void CreateGrid(const uint32_t* cellIds, uint32_t* cellStarts, uint32_t* cellEnds, uint32_t numGrains)
{
uint32_t i = blockIdx.x*blockDim.x + threadIdx.x;
// scan the particle-cell array to find the start and end
uint32_t c = cellIds[i];
if (i == 0)
{
cellStarts[c] = i;
}
else
{
uint32_t p = cellIds[i-1];
if (c != p)
{
cellStarts[c] = i;
cellEnds[p] = i;
}
}
if (i == numGrains-1)
{
cellEnds[c] = i+1;
}
}
__device__ inline Vec3 CollideSphere(Vec3 xa, Vec3 xb, Vec3 va, Vec3 vb, float ra, float rb, float baumgarte, float friction, float overlap)
{
// distance to sphere
Vec3 t = xa - xb;
Vec3 j = Vec3(0.0f, 0.0f, 0.0f);
float d = Dot(t, t);
float rsum = ra + rb;
float mtd = d - sqr(rsum);
if (mtd < 0.0f)
{
Vec3 n = Vec3(0.0f, 1.0f, 0.0f);
if (d > 0.0f)
{
float rcpDist = rsqrtf(d);
n = t * rcpDist;
d = d * rcpDist;
}
j = CollisionImpulse(vb, va, 1.0f, 1.0f, n, d-rsum, baumgarte, friction, overlap);
}
return j;
}
__device__ inline Vec3 CollideCell(int index, int cx, int cy, int cz, const uint32_t* cellStarts, const uint32_t* cellEnds, const uint32_t* indices,
const Vec3* positions, const Vec3* velocities, const float* radii, Vec3 x, Vec3 v, float r, float baumgarte, float friction, float overlap)
{
Vec3 j = Vec3(0.0f, 0.0f, 0.0f);
uint32_t cellIndex = GridHash(cx, cy, cz);
uint32_t cellStart = cellStarts[cellIndex];
uint32_t cellEnd = cellEnds[cellIndex];
for (int i=cellStart; i < cellEnd; ++i)
{
uint32_t particleIndex = i;//indices[i];
if (particleIndex != index)
{
j += CollideSphere(x, positions[particleIndex], v, velocities[particleIndex], r, radii[particleIndex], baumgarte, friction, overlap);
}
}
return j;
}
#endif
__global__ void ReorderParticles(const Vec3* positions, const Vec3* velocities, const float* radii, Vec3* sortedPositions, Vec3* sortedVelocities, float* sortedRadii, const uint32_t* indices)
{
uint32_t i = blockIdx.x*blockDim.x + threadIdx.x;
int originalIndex = indices[i];
sortedPositions[i] = positions[originalIndex];
sortedVelocities[i] = velocities[originalIndex];
sortedRadii[i] = radii[originalIndex];
}
__global__ void Collide(const Vec3* positions, const Vec3* velocities, const float* radii, const uint32_t* cellStarts, const uint32_t* cellEnds, const uint32_t* indices,
Vec3* newVelocities, int numGrains, GrainParams params, float dt, float scale)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const Vec3 x = positions[index];
const Vec3 v = velocities[index];
const float r = radii[index];
Vec3 vd = Vec3(0.0f, 0.0f, 0.0f);
#if USE_GRID
// collide particles
int cx = GridCoord(x.x, kInvCellEdge);
int cy = GridCoord(x.y, kInvCellEdge);
int cz = GridCoord(x.z, kInvCellEdge);
for (int k=cz-1; k <= cz+1; ++k)
{
for (int j=cy-1; j <= cy+1; ++j)
{
for (int i=cx-1; i <= cx+1; ++i)
{
vd += CollideCell(index, i, j, k, cellStarts, cellEnds, indices, positions, velocities, radii, x, v, r, params.mBaumgarte, params.mFriction, params.mOverlap);
}
}
}
#endif
// collide planes
for (int i=0; i < params.mNumPlanes; ++i)
{
Vec4 p = params.mPlanes[i];
// distance to plane
float d = x.x*p.x + x.y*p.y + x.z*p.z + p.w;
float mtd = d - r;
if (mtd < 0.0f)
{
vd += CollisionImpulse(Vec3(0.0f, 0.0f, 0.0f), v, 0.0f, 1.0f, Vec3(p.x, p.y, p.z), mtd, params.mBaumgarte, 0.8f, params.mOverlap);
}
}
// write back velocity
newVelocities[index] = v + vd * scale;
}
__global__ void IntegrateForce(Vec3* velocities, Vec3 gravity, float damp, float dt)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
velocities[index] += (gravity - damp*velocities[index])*dt;
}
__global__ void IntegrateVelocity(Vec3* positions, Vec3* velocities, const Vec3* newVelocities, float dt)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
// x += v*dt
velocities[index] = newVelocities[index];
positions[index] += velocities[index]*dt;
}
/*
__global__ void PrintCellCounts(uint32_t* cellStarts, uint32_t* cellEnds)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
printf("%d\n", cellEnds[index]-cellStarts[index]);
}
*/
//------------------------------------------------------------------
GrainSystem* grainCreateSystem(int numGrains)
{
GrainSystem* s = new GrainSystem();
s->mNumGrains = numGrains;
hipMalloc(&s->mPositions, numGrains*sizeof(Vec3));
hipMalloc(&s->mVelocities, numGrains*sizeof(Vec3));
hipMalloc(&s->mNewVelocities, numGrains*sizeof(Vec3));
hipMalloc(&s->mRadii, numGrains*sizeof(float));
hipMalloc(&s->mSortedPositions, numGrains*sizeof(Vec3));
hipMalloc(&s->mSortedVelocities, numGrains*sizeof(Vec3));
hipMalloc(&s->mSortedRadii, numGrains*sizeof(float));
// grid
#if USE_GRID
hipMalloc(&s->mCellStarts, kGridDim*kGridDim*kGridDim*sizeof(uint32_t));
hipMalloc(&s->mCellEnds, kGridDim*kGridDim*kGridDim*sizeof(uint32_t));
#endif
hipMalloc(&s->mCellIds, numGrains*sizeof(uint32_t));
hipMalloc(&s->mIndices, numGrains*sizeof(uint32_t));
return s;
}
void grainDestroySystem(GrainSystem* s)
{
hipFree(s->mPositions);
hipFree(s->mVelocities);
hipFree(s->mNewVelocities);
hipFree(s->mRadii);
hipFree(s->mSortedPositions);
hipFree(s->mSortedVelocities);
hipFree(s->mSortedRadii);
#if USE_GRID
hipFree(s->mCellStarts);
hipFree(s->mCellEnds);
#endif
hipFree(s->mCellIds);
hipFree(s->mIndices);
delete s;
}
void grainSetSprings(GrainSystem* s, const uint32_t* springIndices, const float* springLengths, uint32_t numSprings)
{
/*
s->mSpringIndices = (uint32_t*)malloc(numSprings*2*sizeof(uint32_t));
s->mSpringLengths = (float*)malloc(numSprings*sizeof(float));
memcpy(s->mSpringIndices, springIndices, numSprings*2*sizeof(uint32_t));
memcpy(s->mSpringLengths, springLengths, numSprings*sizeof(float));
s->mNumSprings = numSprings;
*/
}
void grainSetPositions(GrainSystem* s, float* p, int n)
{
hipMemcpy(&s->mPositions[0], p, sizeof(Vec3)*n, hipMemcpyHostToDevice);
}
void grainSetVelocities(GrainSystem* s, float* v, int n)
{
hipMemcpy(&s->mVelocities[0], v, sizeof(Vec3)*n, hipMemcpyHostToDevice);
}
void grainSetRadii(GrainSystem* s, float* r)
{
hipMemcpy(&s->mRadii[0], r, sizeof(float)*s->mNumGrains, hipMemcpyHostToDevice);
}
void grainGetPositions(GrainSystem* s, float* p)
{
hipMemcpy(p, &s->mPositions[0], sizeof(Vec3)*s->mNumGrains, hipMemcpyDeviceToHost);
}
void grainGetVelocities(GrainSystem* s, float* v)
{
hipMemcpy(v, &s->mVelocities[0], sizeof(Vec3)*s->mNumGrains, hipMemcpyDeviceToHost);
}
void grainGetRadii(GrainSystem* s, float* r)
{
hipMemcpy(r, &s->mRadii[0], sizeof(float)*s->mNumGrains, hipMemcpyDeviceToHost);
}
void grainSetParams(GrainSystem* s, GrainParams* params)
{
//hipMemcpy(s->mParams, params, sizeof(GrainParams), hipMemcpyHostToDevice);
s->mParams = *params;
}
void grainUpdateSystem(GrainSystem* s, float dt, int iterations, GrainTimers* timers)
{
//iterations = 10;
dt /= iterations;
const int kNumThreadsPerBlock = 128;
const int kNumBlocks = s->mNumGrains / kNumThreadsPerBlock;
GrainParams params = s->mParams;
params.mBaumgarte /= dt;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipFuncSetCacheConfig(CreateCellIndices, hipFuncCachePreferL1);
hipFuncSetCacheConfig(CreateGrid, hipFuncCachePreferL1);
hipFuncSetCacheConfig(ReorderParticles, hipFuncCachePreferL1);
hipFuncSetCacheConfig(IntegrateForce, hipFuncCachePreferL1);
hipFuncSetCacheConfig(IntegrateVelocity, hipFuncCachePreferL1);
hipFuncSetCacheConfig(Collide, hipFuncCachePreferL1);
for (int i=0; i < iterations; ++i)
{
{
CudaTimer timer("CreateCellIndices", start, stop, timers->mCreateCellIndices);
hipLaunchKernelGGL(( CreateCellIndices), dim3(kNumBlocks), dim3(kNumThreadsPerBlock), 0, 0, s->mPositions, s->mCellIds, s->mIndices);
}
{
CudaTimer timer("SortCellIndices", start, stop, timers->mSortCellIndices);
SortCellIndices(s->mCellIds, s->mIndices, s->mNumGrains);
}
#if USE_GRID
{
CudaTimer timer("CreateGrid", start, stop, timers->mCreateGrid);
hipMemset(s->mCellStarts, 0, sizeof(uint32_t)*kGridDim*kGridDim*kGridDim);
hipMemset(s->mCellEnds, 0, sizeof(uint32_t)*kGridDim*kGridDim*kGridDim);
hipLaunchKernelGGL(( CreateGrid), dim3(kNumBlocks), dim3(kNumThreadsPerBlock), 0, 0, s->mCellIds, s->mCellStarts, s->mCellEnds, s->mNumGrains);
}
#endif
{
CudaTimer timer("ReorderParticles", start, stop, timers->mReorder);
hipLaunchKernelGGL(( ReorderParticles), dim3(kNumBlocks), dim3(kNumThreadsPerBlock), 0, 0, s->mPositions, s->mVelocities, s->mRadii, s->mSortedPositions, s->mSortedVelocities, s->mSortedRadii, s->mIndices);
}
//PrintCellCounts<<<kGridDim*kGridDim/kNumThreadsPerBlock, kNumThreadsPerBlock>>>(s->mCellStarts, s->mCellEnds);
{
float t;
CudaTimer timer("Integrate Force", start, stop, t);
hipLaunchKernelGGL(( IntegrateForce), dim3(kNumBlocks), dim3(kNumThreadsPerBlock), 0, 0, s->mSortedVelocities, s->mParams.mGravity, s->mParams.mDamp, dt);
}
{
CudaTimer timer("Collide", start, stop, timers->mCollide);
float scale = 1;//float(i+1)/(iterations);
hipLaunchKernelGGL(( Collide), dim3(kNumBlocks), dim3(kNumThreadsPerBlock), 0, 0, s->mSortedPositions, s->mSortedVelocities, s->mSortedRadii, s->mCellStarts, s->mCellEnds, s->mIndices, s->mNewVelocities, s->mNumGrains, params, dt, scale);
}
{
CudaTimer timer("Integrate", start, stop, timers->mIntegrate);
hipLaunchKernelGGL(( IntegrateVelocity), dim3(kNumBlocks), dim3(kNumThreadsPerBlock), 0, 0, s->mSortedPositions, s->mSortedVelocities, s->mNewVelocities, dt);
}
swap(s->mSortedPositions, s->mPositions);
swap(s->mSortedVelocities, s->mVelocities);
swap(s->mSortedRadii, s->mRadii);
}
hipEventDestroy(start);
hipEventDestroy(stop);
}
#endif
| a9be3386d997fbb0e92e58d8c7948332b85ddc8c.cu | #if 0
#include "solve.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <iostream>
#ifdef _WIN32
typedef unsigned int uint32_t;
//typedef unsigned short uint32_t;
#endif
using namespace std;
#define PROFILE 0
#define USE_GRID 1
#define USE_BOX_PRUNING 0
#define kRadius 0.1f
#define kMaxRadius (kRadius)
#define kInvCellEdge (0.5f/kMaxRadius)
#if USE_GRID
typedef uint32_t CellId;
#else
typedef float CellId;
#endif
struct GrainSystem
{
public:
Vec3* mPositions;
Vec3* mVelocities;
float* mRadii;
Vec3* mSortedPositions;
Vec3* mSortedVelocities;
float* mSortedRadii;
Vec3* mNewVelocities;
uint32_t* mCellStarts;
uint32_t* mCellEnds;
CellId* mCellIds;
uint32_t* mIndices;
uint32_t mNumGrains;
GrainParams mParams;
};
#if PROFILE
struct CudaTimer
{
CudaTimer(const char* name, cudaEvent_t start, cudaEvent_t stop, float& timer) : mTimer(timer), mName(name), mStart(start), mStop(stop)
{
cudaEventRecord(mStart, 0);
}
~CudaTimer()
{
cudaEventRecord(mStop, 0);
cudaEventSynchronize(mStop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, mStart, mStop);
mTimer += elapsedTime;
//cout << mName << " took: " << elapsedTime << endl;
}
float& mTimer;
cudaEvent_t mStart;
cudaEvent_t mStop;
const char* mName;
};
#else
struct CudaTimer
{
CudaTimer(const char*, cudaEvent_t, cudaEvent_t, float& ) {}
};
#endif
void SortCellIndices(uint32_t* cellIds, uint32_t* particleIndices, uint32_t numGrains);
void SortCellIndices(float* cellIds, uint32_t* particleIndices, uint32_t numGrains);
__device__ inline float sqr(float x) { return x*x; }
// calculate collision impulse
__device__ inline Vec3 CollisionImpulse(Vec3 va, Vec3 vb, float ma, float mb, Vec3 n, float d, float baumgarte, float friction, float overlap)
{
// calculate relative velocity
Vec3 vd = vb-va;
// calculate relative normal velocity
float vn = Dot(vd, n);
Vec3 j = Vec3(0.0f, 0.0f, 0.0f);
//if (vn < 0.0f)
vn = min(vn, 0.0f);
{
// calculate relative tangential velocity
Vec3 vt = vd - n*vn;
float vtsq = Dot(vt, vt);
float rcpvt = rsqrtf(vtsq);// + 0.001f);
// position bias
float bias = baumgarte*min(d+overlap, 0.0f);
Vec3 jn = -(vn + bias)*n;
Vec3 jt = max(friction*vn*rcpvt, -1.0f)*vt;
// crappy static friction
if (fabsf(vtsq*rcpvt) < fabsf(friction*vn*2.0f) && vn < 0.0f)
jt = -vt;
// total mass
float msum = ma + mb;
// normal impulse
j = (jn + jt)*mb/msum;
}
return j;
}
#if USE_GRID
const uint32_t kGridDim = 128;
// transform a world space coordinate into cell coordinate
__device__ inline uint32_t GridCoord(float x, float invCellEdge)
{
// offset to handle negative numbers
float l = x+1000.0f;
uint32_t c = (uint32_t)(floorf(l*invCellEdge));
return c;
}
__device__ inline uint32_t GridHash(int x, int y, int z)
{
uint32_t cx = x & (kGridDim-1);
uint32_t cy = y & (kGridDim-1);
uint32_t cz = z & (kGridDim-1);
return cy*(kGridDim*kGridDim) + cx*kGridDim + cz;
}
/*
__device__ inline uint32_t GridHash(int x, int y, int z)
{
const uint32_t p1 = 73856093;
const uint32_t p2 = 19349663;
const uint32_t p3 = 53471161;
uint32_t n = x*p1 ^ y*p2 ^ z*p3;
return n&(kGridDim*kGridDim*kGridDim-1);
}
*/
__global__ void CreateCellIndices(const Vec3* positions, uint32_t* cellIds, uint32_t* particleIndices)
{
uint32_t i = blockIdx.x*blockDim.x + threadIdx.x;
Vec3 p = positions[i];
cellIds[i] = GridHash(GridCoord(p.x, kInvCellEdge), GridCoord(p.y, kInvCellEdge), GridCoord(p.z, kInvCellEdge));
particleIndices[i] = i;
}
__global__ void CreateGrid(const uint32_t* cellIds, uint32_t* cellStarts, uint32_t* cellEnds, uint32_t numGrains)
{
uint32_t i = blockIdx.x*blockDim.x + threadIdx.x;
// scan the particle-cell array to find the start and end
uint32_t c = cellIds[i];
if (i == 0)
{
cellStarts[c] = i;
}
else
{
uint32_t p = cellIds[i-1];
if (c != p)
{
cellStarts[c] = i;
cellEnds[p] = i;
}
}
if (i == numGrains-1)
{
cellEnds[c] = i+1;
}
}
__device__ inline Vec3 CollideSphere(Vec3 xa, Vec3 xb, Vec3 va, Vec3 vb, float ra, float rb, float baumgarte, float friction, float overlap)
{
// distance to sphere
Vec3 t = xa - xb;
Vec3 j = Vec3(0.0f, 0.0f, 0.0f);
float d = Dot(t, t);
float rsum = ra + rb;
float mtd = d - sqr(rsum);
if (mtd < 0.0f)
{
Vec3 n = Vec3(0.0f, 1.0f, 0.0f);
if (d > 0.0f)
{
float rcpDist = rsqrtf(d);
n = t * rcpDist;
d = d * rcpDist;
}
j = CollisionImpulse(vb, va, 1.0f, 1.0f, n, d-rsum, baumgarte, friction, overlap);
}
return j;
}
__device__ inline Vec3 CollideCell(int index, int cx, int cy, int cz, const uint32_t* cellStarts, const uint32_t* cellEnds, const uint32_t* indices,
const Vec3* positions, const Vec3* velocities, const float* radii, Vec3 x, Vec3 v, float r, float baumgarte, float friction, float overlap)
{
Vec3 j = Vec3(0.0f, 0.0f, 0.0f);
uint32_t cellIndex = GridHash(cx, cy, cz);
uint32_t cellStart = cellStarts[cellIndex];
uint32_t cellEnd = cellEnds[cellIndex];
for (int i=cellStart; i < cellEnd; ++i)
{
uint32_t particleIndex = i;//indices[i];
if (particleIndex != index)
{
j += CollideSphere(x, positions[particleIndex], v, velocities[particleIndex], r, radii[particleIndex], baumgarte, friction, overlap);
}
}
return j;
}
#endif
__global__ void ReorderParticles(const Vec3* positions, const Vec3* velocities, const float* radii, Vec3* sortedPositions, Vec3* sortedVelocities, float* sortedRadii, const uint32_t* indices)
{
uint32_t i = blockIdx.x*blockDim.x + threadIdx.x;
int originalIndex = indices[i];
sortedPositions[i] = positions[originalIndex];
sortedVelocities[i] = velocities[originalIndex];
sortedRadii[i] = radii[originalIndex];
}
__global__ void Collide(const Vec3* positions, const Vec3* velocities, const float* radii, const uint32_t* cellStarts, const uint32_t* cellEnds, const uint32_t* indices,
Vec3* newVelocities, int numGrains, GrainParams params, float dt, float scale)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const Vec3 x = positions[index];
const Vec3 v = velocities[index];
const float r = radii[index];
Vec3 vd = Vec3(0.0f, 0.0f, 0.0f);
#if USE_GRID
// collide particles
int cx = GridCoord(x.x, kInvCellEdge);
int cy = GridCoord(x.y, kInvCellEdge);
int cz = GridCoord(x.z, kInvCellEdge);
for (int k=cz-1; k <= cz+1; ++k)
{
for (int j=cy-1; j <= cy+1; ++j)
{
for (int i=cx-1; i <= cx+1; ++i)
{
vd += CollideCell(index, i, j, k, cellStarts, cellEnds, indices, positions, velocities, radii, x, v, r, params.mBaumgarte, params.mFriction, params.mOverlap);
}
}
}
#endif
// collide planes
for (int i=0; i < params.mNumPlanes; ++i)
{
Vec4 p = params.mPlanes[i];
// distance to plane
float d = x.x*p.x + x.y*p.y + x.z*p.z + p.w;
float mtd = d - r;
if (mtd < 0.0f)
{
vd += CollisionImpulse(Vec3(0.0f, 0.0f, 0.0f), v, 0.0f, 1.0f, Vec3(p.x, p.y, p.z), mtd, params.mBaumgarte, 0.8f, params.mOverlap);
}
}
// write back velocity
newVelocities[index] = v + vd * scale;
}
__global__ void IntegrateForce(Vec3* velocities, Vec3 gravity, float damp, float dt)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
velocities[index] += (gravity - damp*velocities[index])*dt;
}
__global__ void IntegrateVelocity(Vec3* positions, Vec3* velocities, const Vec3* newVelocities, float dt)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
// x += v*dt
velocities[index] = newVelocities[index];
positions[index] += velocities[index]*dt;
}
/*
__global__ void PrintCellCounts(uint32_t* cellStarts, uint32_t* cellEnds)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
printf("%d\n", cellEnds[index]-cellStarts[index]);
}
*/
//------------------------------------------------------------------
GrainSystem* grainCreateSystem(int numGrains)
{
GrainSystem* s = new GrainSystem();
s->mNumGrains = numGrains;
cudaMalloc(&s->mPositions, numGrains*sizeof(Vec3));
cudaMalloc(&s->mVelocities, numGrains*sizeof(Vec3));
cudaMalloc(&s->mNewVelocities, numGrains*sizeof(Vec3));
cudaMalloc(&s->mRadii, numGrains*sizeof(float));
cudaMalloc(&s->mSortedPositions, numGrains*sizeof(Vec3));
cudaMalloc(&s->mSortedVelocities, numGrains*sizeof(Vec3));
cudaMalloc(&s->mSortedRadii, numGrains*sizeof(float));
// grid
#if USE_GRID
cudaMalloc(&s->mCellStarts, kGridDim*kGridDim*kGridDim*sizeof(uint32_t));
cudaMalloc(&s->mCellEnds, kGridDim*kGridDim*kGridDim*sizeof(uint32_t));
#endif
cudaMalloc(&s->mCellIds, numGrains*sizeof(uint32_t));
cudaMalloc(&s->mIndices, numGrains*sizeof(uint32_t));
return s;
}
void grainDestroySystem(GrainSystem* s)
{
cudaFree(s->mPositions);
cudaFree(s->mVelocities);
cudaFree(s->mNewVelocities);
cudaFree(s->mRadii);
cudaFree(s->mSortedPositions);
cudaFree(s->mSortedVelocities);
cudaFree(s->mSortedRadii);
#if USE_GRID
cudaFree(s->mCellStarts);
cudaFree(s->mCellEnds);
#endif
cudaFree(s->mCellIds);
cudaFree(s->mIndices);
delete s;
}
void grainSetSprings(GrainSystem* s, const uint32_t* springIndices, const float* springLengths, uint32_t numSprings)
{
/*
s->mSpringIndices = (uint32_t*)malloc(numSprings*2*sizeof(uint32_t));
s->mSpringLengths = (float*)malloc(numSprings*sizeof(float));
memcpy(s->mSpringIndices, springIndices, numSprings*2*sizeof(uint32_t));
memcpy(s->mSpringLengths, springLengths, numSprings*sizeof(float));
s->mNumSprings = numSprings;
*/
}
void grainSetPositions(GrainSystem* s, float* p, int n)
{
cudaMemcpy(&s->mPositions[0], p, sizeof(Vec3)*n, cudaMemcpyHostToDevice);
}
void grainSetVelocities(GrainSystem* s, float* v, int n)
{
cudaMemcpy(&s->mVelocities[0], v, sizeof(Vec3)*n, cudaMemcpyHostToDevice);
}
void grainSetRadii(GrainSystem* s, float* r)
{
cudaMemcpy(&s->mRadii[0], r, sizeof(float)*s->mNumGrains, cudaMemcpyHostToDevice);
}
void grainGetPositions(GrainSystem* s, float* p)
{
cudaMemcpy(p, &s->mPositions[0], sizeof(Vec3)*s->mNumGrains, cudaMemcpyDeviceToHost);
}
void grainGetVelocities(GrainSystem* s, float* v)
{
cudaMemcpy(v, &s->mVelocities[0], sizeof(Vec3)*s->mNumGrains, cudaMemcpyDeviceToHost);
}
void grainGetRadii(GrainSystem* s, float* r)
{
cudaMemcpy(r, &s->mRadii[0], sizeof(float)*s->mNumGrains, cudaMemcpyDeviceToHost);
}
void grainSetParams(GrainSystem* s, GrainParams* params)
{
//cudaMemcpy(s->mParams, params, sizeof(GrainParams), cudaMemcpyHostToDevice);
s->mParams = *params;
}
void grainUpdateSystem(GrainSystem* s, float dt, int iterations, GrainTimers* timers)
{
//iterations = 10;
dt /= iterations;
const int kNumThreadsPerBlock = 128;
const int kNumBlocks = s->mNumGrains / kNumThreadsPerBlock;
GrainParams params = s->mParams;
params.mBaumgarte /= dt;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaFuncSetCacheConfig(CreateCellIndices, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(CreateGrid, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(ReorderParticles, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(IntegrateForce, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(IntegrateVelocity, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(Collide, cudaFuncCachePreferL1);
for (int i=0; i < iterations; ++i)
{
{
CudaTimer timer("CreateCellIndices", start, stop, timers->mCreateCellIndices);
CreateCellIndices<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mPositions, s->mCellIds, s->mIndices);
}
{
CudaTimer timer("SortCellIndices", start, stop, timers->mSortCellIndices);
SortCellIndices(s->mCellIds, s->mIndices, s->mNumGrains);
}
#if USE_GRID
{
CudaTimer timer("CreateGrid", start, stop, timers->mCreateGrid);
cudaMemset(s->mCellStarts, 0, sizeof(uint32_t)*kGridDim*kGridDim*kGridDim);
cudaMemset(s->mCellEnds, 0, sizeof(uint32_t)*kGridDim*kGridDim*kGridDim);
CreateGrid<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mCellIds, s->mCellStarts, s->mCellEnds, s->mNumGrains);
}
#endif
{
CudaTimer timer("ReorderParticles", start, stop, timers->mReorder);
ReorderParticles<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mPositions, s->mVelocities, s->mRadii, s->mSortedPositions, s->mSortedVelocities, s->mSortedRadii, s->mIndices);
}
//PrintCellCounts<<<kGridDim*kGridDim/kNumThreadsPerBlock, kNumThreadsPerBlock>>>(s->mCellStarts, s->mCellEnds);
{
float t;
CudaTimer timer("Integrate Force", start, stop, t);
IntegrateForce<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mSortedVelocities, s->mParams.mGravity, s->mParams.mDamp, dt);
}
{
CudaTimer timer("Collide", start, stop, timers->mCollide);
float scale = 1;//float(i+1)/(iterations);
Collide<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mSortedPositions, s->mSortedVelocities, s->mSortedRadii, s->mCellStarts, s->mCellEnds, s->mIndices, s->mNewVelocities, s->mNumGrains, params, dt, scale);
}
{
CudaTimer timer("Integrate", start, stop, timers->mIntegrate);
IntegrateVelocity<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mSortedPositions, s->mSortedVelocities, s->mNewVelocities, dt);
}
swap(s->mSortedPositions, s->mPositions);
swap(s->mSortedVelocities, s->mVelocities);
swap(s->mSortedRadii, s->mRadii);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
#endif
|
461cefcecf9c0f24579833542f9a0b11fafe3631.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#define NUM_THREADS 64 // Number of threads per work group.
__device__
float4 firstEigenVector( float* matrix )
{
// 8 iterations seems to be more than enough.
float4 v = make_float4(1.0f, 1.0f, 1.0f, 0.0f);
#pragma unroll
for(int i = 0; i < 8; i++) {
float x = v.x * matrix[0] + v.y * matrix[1] + v.z * matrix[2];
float y = v.x * matrix[1] + v.y * matrix[3] + v.z * matrix[4];
float z = v.x * matrix[2] + v.y * matrix[4] + v.z * matrix[5];
float m = fmaxf(fmaxf(x, y), z);
float iv = 1.0f / m;
v.x = x * iv;
v.y = y * iv;
v.z = z * iv;
}
return v;
}
__device__
void colorSums( const float4 * colors, float4 * sums)
{
const int idx = threadIdx.x;
sums[idx] = colors[idx];
sums[idx] += sums[idx^8];
sums[idx] += sums[idx^4];
sums[idx] += sums[idx^2];
sums[idx] += sums[idx^1];
}
__device__
float4 bestFitLine( const float4 * colors, float4 color_sum, float* covariance)
{
// Compute covariance matrix of the given colors.
const int idx = threadIdx.x;
float4 diff = colors[idx] - color_sum * make_float4(0.0625f, 0.0625f, 0.0625f, 0.0625f); // * 1.0f / 16.0f
covariance[6 * idx + 0] = diff.x * diff.x; // 0, 6, 12, 2, 8, 14, 4, 10, 0
covariance[6 * idx + 1] = diff.x * diff.y;
covariance[6 * idx + 2] = diff.x * diff.z;
covariance[6 * idx + 3] = diff.y * diff.y;
covariance[6 * idx + 4] = diff.y * diff.z;
covariance[6 * idx + 5] = diff.z * diff.z;
#pragma unroll
for(int d = 8; d > 0; d >>= 1)
{
if (idx < d)
{
covariance[6 * idx + 0] += covariance[6 * (idx+d) + 0];
covariance[6 * idx + 1] += covariance[6 * (idx+d) + 1];
covariance[6 * idx + 2] += covariance[6 * (idx+d) + 2];
covariance[6 * idx + 3] += covariance[6 * (idx+d) + 3];
covariance[6 * idx + 4] += covariance[6 * (idx+d) + 4];
covariance[6 * idx + 5] += covariance[6 * (idx+d) + 5];
}
}
// Compute first eigen vector.
return firstEigenVector(covariance);
}
// ////////////////////////////////////////////////////////////////////////////////
// // Sort colors
// ////////////////////////////////////////////////////////////////////////////////
__device__
void sortColors( const float * values, int * ranks)
{
const int tid = threadIdx.x;
int rank = 0;
#pragma unroll
for (int i = 0; i < 16; i++)
{
rank += (values[i] < values[tid]);
}
ranks[tid] = rank;
// Resolve elements with the same index.
#pragma unroll
for (int i = 0; i < 15; i++)
{
if (tid > i && ranks[tid] == ranks[i]) ++ranks[tid];
}
}
////////////////////////////////////////////////////////////////////////////////
// Load color block to shared mem
////////////////////////////////////////////////////////////////////////////////
__device__
void loadColorBlock( const uint * image, float4 * colors, float4 * sums, int * xrefs, float* temp, int groupOffset)
{
const int bid = blockIdx.x + groupOffset;
const int idx = threadIdx.x;
float4 tmp;
if (idx < 16)
{
// Read color and copy to shared mem.
uint c = image[(bid) * 16 + idx];
colors[idx].x = ((c >> 0) & 0xFF) * 0.003921568627f; // * (1.0f / 255.0f);
colors[idx].y = ((c >> 8) & 0xFF) * 0.003921568627f; // * (1.0f / 255.0f);
colors[idx].z = ((c >> 16) & 0xFF) * 0.003921568627f; //* (1.0f / 255.0f);
// No need to synchronize, 16 < warp size.
// Sort colors along the best fit line.
colorSums(colors, sums);
float4 axis = bestFitLine(colors, sums[idx], temp);
temp[idx] = colors[idx].x * axis.x + colors[idx].y * axis.y + colors[idx].z * axis.z;
sortColors(temp, xrefs);
tmp = colors[idx];
colors[xrefs[idx]] = tmp;
}
}
// ////////////////////////////////////////////////////////////////////////////////
// // Round color to RGB565 and expand
// ////////////////////////////////////////////////////////////////////////////////
__device__
float4 roundAndExpand(float4 v, ushort * w)
{
ushort x = rint(__saturatef(v.x) * 31.0f);
ushort y = rint(__saturatef(v.y) * 63.0f);
ushort z = rint(__saturatef(v.z) * 31.0f);
*w = ((x << 11) | (y << 5) | z);
v.x = x * 0.03227752766457f; // approximate integer bit expansion.
v.y = y * 0.01583151765563f;
v.z = z * 0.03227752766457f;
return v;
}
////////////////////////////////////////////////////////////////////////////////
// Evaluate permutations
////////////////////////////////////////////////////////////////////////////////
__device__
float evalPermutation( const float4* colors, uint permutation, ushort* start, ushort* end, float4 color_sum,
float* alphaTable4, int* prods4, float weight)
{
float4 alphax_sum = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int akku = 0;
// Compute alpha & beta for this permutation.
#pragma unroll
for (int i = 0; i < 16; i++)
{
const uint bits = permutation >> (2*i);
alphax_sum += alphaTable4[bits & 3] * colors[i];
akku += prods4[bits & 3];
}
float alpha2_sum = (akku >> 16);
float beta2_sum = ((akku >> 8) & 0xff);
float alphabeta_sum = ((akku >> 0) & 0xff);
float4 betax_sum = weight * color_sum - alphax_sum;
//// Compute endpoints using least squares.
// alpha2, beta2, alphabeta and factor could be precomputed for each permutation, but it's faster to recompute them.
const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
float4 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
float4 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
// Round a, b to the closest 5-6-5 color and expand...
a = roundAndExpand(a, start);
b = roundAndExpand(b, end);
// compute the error
float4 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
return (1.0f/weight) * (e.x + e.y + e.z);
}
__device__
float evalPermutation3(const float4 * colors, uint permutation, ushort * start, ushort * end, float4 color_sum,
float* alphaTable3, int* prods3)
{
float4 alphax_sum = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int akku = 0;
// Compute alpha & beta for this permutation.
#pragma unroll
for (int i = 0; i < 16; i++)
{
const uint bits = permutation >> (2*i);
alphax_sum += alphaTable3[bits & 3] * colors[i];
akku += prods3[bits & 3];
}
float alpha2_sum = (akku >> 16);
float beta2_sum = ((akku >> 8) & 0xff);
float alphabeta_sum = ((akku >> 0) & 0xff);
float4 betax_sum = 4.0f * color_sum - alphax_sum;
const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
float4 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
float4 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
// Round a, b to the closest 5-6-5 color and expand...
a = roundAndExpand(a, start);
b = roundAndExpand(b, end);
// compute the error
float4 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
return (0.25f) * (e.x + e.y + e.z);
}
__device__
uint4 evalAllPermutations(const float4 * colors, const unsigned int * permutations,
float *errors, float4 color_sum, uint * s_permutations,
float* alphaTable4, int* prods4,
float* alphaTable3, int* prods3)
{
const int idx = threadIdx.x;
uint bestStart;
uint bestEnd;
uint bestPermutation;
uint temp;
float bestError = FLT_MAX;
#pragma unroll
for(int i = 0; i < 16; i++)
{
int pidx = idx + NUM_THREADS * i;
if (pidx >= 992) break;
ushort start, end;
uint permutation = permutations[pidx];
if (pidx < 160) s_permutations[pidx] = permutation;
float error = evalPermutation(colors, permutation, &start, &end, color_sum, alphaTable4, prods4, 9.0f);
if (error < bestError)
{
bestError = error;
bestPermutation = permutation;
bestStart = start;
bestEnd = end;
}
}
if (bestStart < bestEnd)
{
temp = bestEnd;
bestEnd = bestStart;
bestStart = temp;
bestPermutation ^= 0x55555555; // Flip indices.
}
#pragma unroll
for(int i = 0; i < 3; i++)
{
int pidx = idx + NUM_THREADS * i;
if (pidx >= 160) break;
ushort start, end;
uint permutation = s_permutations[pidx];
float error = evalPermutation(colors, permutation, &start, &end, color_sum, alphaTable3, prods3, 4.0f);
if (error < bestError)
{
bestError = error;
bestPermutation = permutation;
bestStart = start;
bestEnd = end;
if (bestStart > bestEnd)
{
temp = bestEnd;
bestEnd = bestStart;
bestStart = temp;
bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices.
}
}
}
errors[idx] = bestError;
uint4 result = make_uint4(bestStart, bestEnd, bestPermutation, 0);
return result;
}
////////////////////////////////////////////////////////////////////////////////
// Find index with minimum error
////////////////////////////////////////////////////////////////////////////////
__device__
int findMinError( float * errors, int * indices)
{
const int idx = threadIdx.x;
indices[idx] = idx;
#pragma unroll
for(int d = NUM_THREADS/2; d > 32; d >>= 1)
{
__syncthreads();
if (idx < d)
{
float err0 = errors[idx];
float err1 = errors[idx + d];
if (err1 < err0) {
errors[idx] = err1;
indices[idx] = indices[idx + d];
}
}
}
__syncthreads();
// unroll last 6 iterations
if (idx < 32)
{
if (errors[idx + 32] < errors[idx]) {
errors[idx] = errors[idx + 32];
indices[idx] = indices[idx + 32];
}
if (errors[idx + 16] < errors[idx]) {
errors[idx] = errors[idx + 16];
indices[idx] = indices[idx + 16];
}
if (errors[idx + 8] < errors[idx]) {
errors[idx] = errors[idx + 8];
indices[idx] = indices[idx + 8];
}
if (errors[idx + 4] < errors[idx]) {
errors[idx] = errors[idx + 4];
indices[idx] = indices[idx + 4];
}
if (errors[idx + 2] < errors[idx]) {
errors[idx] = errors[idx + 2];
indices[idx] = indices[idx + 2];
}
if (errors[idx + 1] < errors[idx]) {
errors[idx] = errors[idx + 1];
indices[idx] = indices[idx + 1];
}
}
__syncthreads();
return indices[0];
}
//Save DXT block
__device__
void saveBlockDXT1(uint start, uint end, uint permutation, int* xrefs, uint2 * result, int groupOffset)
{
const int bid = blockIdx.x + groupOffset;
if (start == end)
{
permutation = 0;
}
// Reorder permutation.
uint indices = 0;
#pragma unroll
for(int i = 0; i < 16; i++)
{
int ref = xrefs[i];
indices |= ((permutation >> (2 * ref)) & 3) << (2 * i);
}
// Write endpoints.
result[bid].x = (end << 16) | start;
// Write palette indices.
result[bid].y = indices;
}
| 461cefcecf9c0f24579833542f9a0b11fafe3631.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#define NUM_THREADS 64 // Number of threads per work group.
__device__
float4 firstEigenVector( float* matrix )
{
// 8 iterations seems to be more than enough.
float4 v = make_float4(1.0f, 1.0f, 1.0f, 0.0f);
#pragma unroll
for(int i = 0; i < 8; i++) {
float x = v.x * matrix[0] + v.y * matrix[1] + v.z * matrix[2];
float y = v.x * matrix[1] + v.y * matrix[3] + v.z * matrix[4];
float z = v.x * matrix[2] + v.y * matrix[4] + v.z * matrix[5];
float m = fmaxf(fmaxf(x, y), z);
float iv = 1.0f / m;
v.x = x * iv;
v.y = y * iv;
v.z = z * iv;
}
return v;
}
__device__
void colorSums( const float4 * colors, float4 * sums)
{
const int idx = threadIdx.x;
sums[idx] = colors[idx];
sums[idx] += sums[idx^8];
sums[idx] += sums[idx^4];
sums[idx] += sums[idx^2];
sums[idx] += sums[idx^1];
}
__device__
float4 bestFitLine( const float4 * colors, float4 color_sum, float* covariance)
{
// Compute covariance matrix of the given colors.
const int idx = threadIdx.x;
float4 diff = colors[idx] - color_sum * make_float4(0.0625f, 0.0625f, 0.0625f, 0.0625f); // * 1.0f / 16.0f
covariance[6 * idx + 0] = diff.x * diff.x; // 0, 6, 12, 2, 8, 14, 4, 10, 0
covariance[6 * idx + 1] = diff.x * diff.y;
covariance[6 * idx + 2] = diff.x * diff.z;
covariance[6 * idx + 3] = diff.y * diff.y;
covariance[6 * idx + 4] = diff.y * diff.z;
covariance[6 * idx + 5] = diff.z * diff.z;
#pragma unroll
for(int d = 8; d > 0; d >>= 1)
{
if (idx < d)
{
covariance[6 * idx + 0] += covariance[6 * (idx+d) + 0];
covariance[6 * idx + 1] += covariance[6 * (idx+d) + 1];
covariance[6 * idx + 2] += covariance[6 * (idx+d) + 2];
covariance[6 * idx + 3] += covariance[6 * (idx+d) + 3];
covariance[6 * idx + 4] += covariance[6 * (idx+d) + 4];
covariance[6 * idx + 5] += covariance[6 * (idx+d) + 5];
}
}
// Compute first eigen vector.
return firstEigenVector(covariance);
}
// ////////////////////////////////////////////////////////////////////////////////
// // Sort colors
// ////////////////////////////////////////////////////////////////////////////////
__device__
void sortColors( const float * values, int * ranks)
{
const int tid = threadIdx.x;
int rank = 0;
#pragma unroll
for (int i = 0; i < 16; i++)
{
rank += (values[i] < values[tid]);
}
ranks[tid] = rank;
// Resolve elements with the same index.
#pragma unroll
for (int i = 0; i < 15; i++)
{
if (tid > i && ranks[tid] == ranks[i]) ++ranks[tid];
}
}
////////////////////////////////////////////////////////////////////////////////
// Load color block to shared mem
////////////////////////////////////////////////////////////////////////////////
__device__
void loadColorBlock( const uint * image, float4 * colors, float4 * sums, int * xrefs, float* temp, int groupOffset)
{
const int bid = blockIdx.x + groupOffset;
const int idx = threadIdx.x;
float4 tmp;
if (idx < 16)
{
// Read color and copy to shared mem.
uint c = image[(bid) * 16 + idx];
colors[idx].x = ((c >> 0) & 0xFF) * 0.003921568627f; // * (1.0f / 255.0f);
colors[idx].y = ((c >> 8) & 0xFF) * 0.003921568627f; // * (1.0f / 255.0f);
colors[idx].z = ((c >> 16) & 0xFF) * 0.003921568627f; //* (1.0f / 255.0f);
// No need to synchronize, 16 < warp size.
// Sort colors along the best fit line.
colorSums(colors, sums);
float4 axis = bestFitLine(colors, sums[idx], temp);
temp[idx] = colors[idx].x * axis.x + colors[idx].y * axis.y + colors[idx].z * axis.z;
sortColors(temp, xrefs);
tmp = colors[idx];
colors[xrefs[idx]] = tmp;
}
}
// ////////////////////////////////////////////////////////////////////////////////
// // Round color to RGB565 and expand
// ////////////////////////////////////////////////////////////////////////////////
__device__
float4 roundAndExpand(float4 v, ushort * w)
{
ushort x = rint(__saturatef(v.x) * 31.0f);
ushort y = rint(__saturatef(v.y) * 63.0f);
ushort z = rint(__saturatef(v.z) * 31.0f);
*w = ((x << 11) | (y << 5) | z);
v.x = x * 0.03227752766457f; // approximate integer bit expansion.
v.y = y * 0.01583151765563f;
v.z = z * 0.03227752766457f;
return v;
}
////////////////////////////////////////////////////////////////////////////////
// Evaluate permutations
////////////////////////////////////////////////////////////////////////////////
__device__
float evalPermutation( const float4* colors, uint permutation, ushort* start, ushort* end, float4 color_sum,
float* alphaTable4, int* prods4, float weight)
{
float4 alphax_sum = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int akku = 0;
// Compute alpha & beta for this permutation.
#pragma unroll
for (int i = 0; i < 16; i++)
{
const uint bits = permutation >> (2*i);
alphax_sum += alphaTable4[bits & 3] * colors[i];
akku += prods4[bits & 3];
}
float alpha2_sum = (akku >> 16);
float beta2_sum = ((akku >> 8) & 0xff);
float alphabeta_sum = ((akku >> 0) & 0xff);
float4 betax_sum = weight * color_sum - alphax_sum;
//// Compute endpoints using least squares.
// alpha2, beta2, alphabeta and factor could be precomputed for each permutation, but it's faster to recompute them.
const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
float4 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
float4 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
// Round a, b to the closest 5-6-5 color and expand...
a = roundAndExpand(a, start);
b = roundAndExpand(b, end);
// compute the error
float4 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
return (1.0f/weight) * (e.x + e.y + e.z);
}
__device__
float evalPermutation3(const float4 * colors, uint permutation, ushort * start, ushort * end, float4 color_sum,
float* alphaTable3, int* prods3)
{
float4 alphax_sum = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int akku = 0;
// Compute alpha & beta for this permutation.
#pragma unroll
for (int i = 0; i < 16; i++)
{
const uint bits = permutation >> (2*i);
alphax_sum += alphaTable3[bits & 3] * colors[i];
akku += prods3[bits & 3];
}
float alpha2_sum = (akku >> 16);
float beta2_sum = ((akku >> 8) & 0xff);
float alphabeta_sum = ((akku >> 0) & 0xff);
float4 betax_sum = 4.0f * color_sum - alphax_sum;
const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
float4 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
float4 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
// Round a, b to the closest 5-6-5 color and expand...
a = roundAndExpand(a, start);
b = roundAndExpand(b, end);
// compute the error
float4 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
return (0.25f) * (e.x + e.y + e.z);
}
__device__
uint4 evalAllPermutations(const float4 * colors, const unsigned int * permutations,
float *errors, float4 color_sum, uint * s_permutations,
float* alphaTable4, int* prods4,
float* alphaTable3, int* prods3)
{
const int idx = threadIdx.x;
uint bestStart;
uint bestEnd;
uint bestPermutation;
uint temp;
float bestError = FLT_MAX;
#pragma unroll
for(int i = 0; i < 16; i++)
{
int pidx = idx + NUM_THREADS * i;
if (pidx >= 992) break;
ushort start, end;
uint permutation = permutations[pidx];
if (pidx < 160) s_permutations[pidx] = permutation;
float error = evalPermutation(colors, permutation, &start, &end, color_sum, alphaTable4, prods4, 9.0f);
if (error < bestError)
{
bestError = error;
bestPermutation = permutation;
bestStart = start;
bestEnd = end;
}
}
if (bestStart < bestEnd)
{
temp = bestEnd;
bestEnd = bestStart;
bestStart = temp;
bestPermutation ^= 0x55555555; // Flip indices.
}
#pragma unroll
for(int i = 0; i < 3; i++)
{
int pidx = idx + NUM_THREADS * i;
if (pidx >= 160) break;
ushort start, end;
uint permutation = s_permutations[pidx];
float error = evalPermutation(colors, permutation, &start, &end, color_sum, alphaTable3, prods3, 4.0f);
if (error < bestError)
{
bestError = error;
bestPermutation = permutation;
bestStart = start;
bestEnd = end;
if (bestStart > bestEnd)
{
temp = bestEnd;
bestEnd = bestStart;
bestStart = temp;
bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices.
}
}
}
errors[idx] = bestError;
uint4 result = make_uint4(bestStart, bestEnd, bestPermutation, 0);
return result;
}
////////////////////////////////////////////////////////////////////////////////
// Find index with minimum error
////////////////////////////////////////////////////////////////////////////////
__device__
int findMinError( float * errors, int * indices)
{
const int idx = threadIdx.x;
indices[idx] = idx;
#pragma unroll
for(int d = NUM_THREADS/2; d > 32; d >>= 1)
{
__syncthreads();
if (idx < d)
{
float err0 = errors[idx];
float err1 = errors[idx + d];
if (err1 < err0) {
errors[idx] = err1;
indices[idx] = indices[idx + d];
}
}
}
__syncthreads();
// unroll last 6 iterations
if (idx < 32)
{
if (errors[idx + 32] < errors[idx]) {
errors[idx] = errors[idx + 32];
indices[idx] = indices[idx + 32];
}
if (errors[idx + 16] < errors[idx]) {
errors[idx] = errors[idx + 16];
indices[idx] = indices[idx + 16];
}
if (errors[idx + 8] < errors[idx]) {
errors[idx] = errors[idx + 8];
indices[idx] = indices[idx + 8];
}
if (errors[idx + 4] < errors[idx]) {
errors[idx] = errors[idx + 4];
indices[idx] = indices[idx + 4];
}
if (errors[idx + 2] < errors[idx]) {
errors[idx] = errors[idx + 2];
indices[idx] = indices[idx + 2];
}
if (errors[idx + 1] < errors[idx]) {
errors[idx] = errors[idx + 1];
indices[idx] = indices[idx + 1];
}
}
__syncthreads();
return indices[0];
}
//Save DXT block
__device__
void saveBlockDXT1(uint start, uint end, uint permutation, int* xrefs, uint2 * result, int groupOffset)
{
const int bid = blockIdx.x + groupOffset;
if (start == end)
{
permutation = 0;
}
// Reorder permutation.
uint indices = 0;
#pragma unroll
for(int i = 0; i < 16; i++)
{
int ref = xrefs[i];
indices |= ((permutation >> (2 * ref)) & 3) << (2 * i);
}
// Write endpoints.
result[bid].x = (end << 16) | start;
// Write palette indices.
result[bid].y = indices;
}
|
66d9126422955eb0211d18f87a9e740f3b19c405.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include "math.h"
#include "stdio.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
int numOfCols=numRows;
int numOfRows=numCols;
int pixels_number=numOfCols*numOfRows;
int block_size=blockDim.x;
int current_index = block_size* blockIdx.x + threadIdx.x;
int filter_size=filterWidth*filterWidth;
extern __shared__ float sh_filter[];
int num_filter_elements_per_thread=filter_size/block_size;
if(filter_size%block_size>0)
num_filter_elements_per_thread++;
if(threadIdx.x<filter_size){
for(int i=0;i<num_filter_elements_per_thread;i++){
int idx=threadIdx.x*num_filter_elements_per_thread+i;
if(idx>=filter_size)
continue;
sh_filter[idx]=filter[idx];
}
}
__syncthreads();
if(current_index >= pixels_number)
return;
int img_x=current_index/numOfRows;
int img_y=(current_index%numOfRows);
float sum=0.f;
int fcx=filterWidth/2;
int fcy=fcx;
for(int i=0;i<filter_size;i++){
int fx=i/filterWidth;
int fy=(i%filterWidth);
int Imx=fx-fcx+img_x;
int Imy=fy-fcy+img_y;
if(Imx>=numOfCols)
Imx=numOfCols-1;
if(Imy >=numOfRows)
Imy=numOfRows-1;
if(Imx<0)
Imx=0;
if(Imy<0)
Imy=0;
sum+= sh_filter[i]* inputChannel[Imx*numOfRows+Imy];
}
outputChannel[current_index]= sum;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
int pixels_number=numRows*numCols;
int block_size=blockDim.x;
int current_index= block_size* blockIdx.x + threadIdx.x;
if(current_index>=pixels_number)
return;
redChannel[current_index]=inputImageRGBA[current_index].x;
greenChannel[current_index]=inputImageRGBA[current_index].y;
blueChannel[current_index]=inputImageRGBA[current_index].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
int pixels_number=numRows*numCols;
int block_size=blockDim.x;
int current_index= block_size* blockIdx.x + threadIdx.x;
if(current_index>=pixels_number)
return;
unsigned char red = redChannel[current_index];
unsigned char green = greenChannel[current_index];
unsigned char blue = blueChannel[current_index];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[current_index] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_filter,sizeof(float)* filterWidth * filterWidth));
checkCudaErrors(hipMemcpy(d_filter,h_filter,sizeof(float)* filterWidth * filterWidth,hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
double pixels_number=numRows*numCols;
int blocks_number=ceil(pixels_number/1024);
printf("Pixels number : %d \n",pixels_number);
printf("Blocks number : %d \n",blocks_number);
const dim3 blockSize (1024,1,1) ;
const dim3 gridSize(blocks_number,1,1);
hipLaunchKernelGGL(( separateChannels), dim3(gridSize),dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize),filterWidth*filterWidth*sizeof(float), 0, d_red, d_redBlurred, numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize),filterWidth*filterWidth*sizeof(float), 0, d_green, d_greenBlurred, numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize),filterWidth*filterWidth*sizeof(float), 0, d_blue, d_blueBlurred, numRows, numCols,
d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 66d9126422955eb0211d18f87a9e740f3b19c405.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include "math.h"
#include "stdio.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
int numOfCols=numRows;
int numOfRows=numCols;
int pixels_number=numOfCols*numOfRows;
int block_size=blockDim.x;
int current_index = block_size* blockIdx.x + threadIdx.x;
int filter_size=filterWidth*filterWidth;
extern __shared__ float sh_filter[];
int num_filter_elements_per_thread=filter_size/block_size;
if(filter_size%block_size>0)
num_filter_elements_per_thread++;
if(threadIdx.x<filter_size){
for(int i=0;i<num_filter_elements_per_thread;i++){
int idx=threadIdx.x*num_filter_elements_per_thread+i;
if(idx>=filter_size)
continue;
sh_filter[idx]=filter[idx];
}
}
__syncthreads();
if(current_index >= pixels_number)
return;
int img_x=current_index/numOfRows;
int img_y=(current_index%numOfRows);
float sum=0.f;
int fcx=filterWidth/2;
int fcy=fcx;
for(int i=0;i<filter_size;i++){
int fx=i/filterWidth;
int fy=(i%filterWidth);
int Imx=fx-fcx+img_x;
int Imy=fy-fcy+img_y;
if(Imx>=numOfCols)
Imx=numOfCols-1;
if(Imy >=numOfRows)
Imy=numOfRows-1;
if(Imx<0)
Imx=0;
if(Imy<0)
Imy=0;
sum+= sh_filter[i]* inputChannel[Imx*numOfRows+Imy];
}
outputChannel[current_index]= sum;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
int pixels_number=numRows*numCols;
int block_size=blockDim.x;
int current_index= block_size* blockIdx.x + threadIdx.x;
if(current_index>=pixels_number)
return;
redChannel[current_index]=inputImageRGBA[current_index].x;
greenChannel[current_index]=inputImageRGBA[current_index].y;
blueChannel[current_index]=inputImageRGBA[current_index].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
int pixels_number=numRows*numCols;
int block_size=blockDim.x;
int current_index= block_size* blockIdx.x + threadIdx.x;
if(current_index>=pixels_number)
return;
unsigned char red = redChannel[current_index];
unsigned char green = greenChannel[current_index];
unsigned char blue = blueChannel[current_index];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[current_index] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_filter,sizeof(float)* filterWidth * filterWidth));
checkCudaErrors(cudaMemcpy(d_filter,h_filter,sizeof(float)* filterWidth * filterWidth,cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
double pixels_number=numRows*numCols;
int blocks_number=ceil(pixels_number/1024);
printf("Pixels number : %d \n",pixels_number);
printf("Blocks number : %d \n",blocks_number);
const dim3 blockSize (1024,1,1) ;
const dim3 gridSize(blocks_number,1,1);
separateChannels<<<gridSize,blockSize>>>(d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize,filterWidth*filterWidth*sizeof(float)>>>(d_red, d_redBlurred, numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize,filterWidth*filterWidth*sizeof(float)>>>(d_green, d_greenBlurred, numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize,filterWidth*filterWidth*sizeof(float)>>>(d_blue, d_blueBlurred, numRows, numCols,
d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
6af519eac6ddd70494b4099db241a2d93285220d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* A CUDA program that demonstrates how to compute a stereo disparity map using
* SIMD SAD (Sum of Absolute Difference) intrinsics
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include <hip/hip_runtime.h>
#include "stereoDisparity_kernel.cuh"
// includes, project
#include <helper_functions.h> // helper for shared that are common to CUDA SDK samples
#include <helper_cuda.h> // helper for checking cuda initialization and error checking
#include <helper_string.h> // helper functions for string parsing
static char *sSDKsample = "[stereoDisparity]\0";
int iDivUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! CUDA Sample for calculating depth maps
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char **argv)
{
hipDeviceProp_t deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev = 0;
// This will pick the best possible CUDA capable device
dev = findCudaDevice(argc, (const char **)argv);
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x20)
{
printf("%s: requires a minimum CUDA compute 2.0 capability\n", sSDKsample);
exit(EXIT_SUCCESS);
}
StopWatchInterface *timer;
sdkCreateTimer(&timer);
// Search paramters
int minDisp = -16;
int maxDisp = 0;
// Load image data
//allocate mem for the images on host side
//initialize pointers to NULL to request lib call to allocate as needed
// PPM images are loaded into 4 byte/pixel memory (RGBX)
unsigned char *h_img0 = NULL;
unsigned char *h_img1 = NULL;
unsigned int w, h;
char *fname0 = sdkFindFilePath("stereo.im0.640x533.ppm", argv[0]);
char *fname1 = sdkFindFilePath("stereo.im1.640x533.ppm", argv[0]);
printf("Loaded <%s> as image 0\n", fname0);
if (!sdkLoadPPM4ub(fname0, &h_img0, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname0);
}
printf("Loaded <%s> as image 1\n", fname1);
if (!sdkLoadPPM4ub(fname1, &h_img1, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname1);
}
dim3 numThreads = dim3(blockSize_x, blockSize_y, 1);
dim3 numBlocks = dim3(iDivUp(w, numThreads.x), iDivUp(h, numThreads.y));
unsigned int numData = w*h;
unsigned int memSize = sizeof(int) * numData;
//allocate mem for the result on host side
unsigned int *h_odata = (unsigned int *)malloc(memSize);
//initalize the memory
for (unsigned int i = 0; i < numData; i++)
h_odata[i] = 0;
// allocate device memory for result
unsigned int *d_odata, *d_img0, *d_img1;
checkCudaErrors(hipMalloc((void **) &d_odata, memSize));
checkCudaErrors(hipMalloc((void **) &d_img0, memSize));
checkCudaErrors(hipMalloc((void **) &d_img1, memSize));
// copy host memory to device to initialize to zeros
checkCudaErrors(hipMemcpy(d_img0, h_img0, memSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_img1, h_img1, memSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_odata, h_odata, memSize, hipMemcpyHostToDevice));
size_t offset = 0;
hipChannelFormatDesc ca_desc0 = hipCreateChannelDesc<unsigned int>();
hipChannelFormatDesc ca_desc1 = hipCreateChannelDesc<unsigned int>();
tex2Dleft.addressMode[0] = hipAddressModeClamp;
tex2Dleft.addressMode[1] = hipAddressModeClamp;
tex2Dleft.filterMode = hipFilterModePoint;
tex2Dleft.normalized = false;
tex2Dright.addressMode[0] = hipAddressModeClamp;
tex2Dright.addressMode[1] = hipAddressModeClamp;
tex2Dright.filterMode = hipFilterModePoint;
tex2Dright.normalized = false;
checkCudaErrors(hipBindTexture2D(&offset, tex2Dleft, d_img0, ca_desc0, w, h, w*4));
assert(offset == 0);
checkCudaErrors(hipBindTexture2D(&offset, tex2Dright, d_img1, ca_desc1, w, h, w*4));
assert(offset == 0);
// First run the warmup kernel (which we'll use to get the GPU in the correct max power state
hipLaunchKernelGGL(( stereoDisparityKernel), dim3(numBlocks), dim3(numThreads), 0, 0, d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
printf("Launching CUDA stereoDisparityKernel()\n");
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// launch the stereoDisparity kernel
hipLaunchKernelGGL(( stereoDisparityKernel), dim3(numBlocks), dim3(numThreads), 0, 0, d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
// Check to make sure the kernel didn't fail
getLastCudaError("Kernel execution failed");
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
//Copy result from device to host for verification
checkCudaErrors(hipMemcpy(h_odata, d_odata, memSize, hipMemcpyDeviceToHost));
printf("Input Size [%dx%d], ", w, h);
printf("Kernel size [%dx%d], ", (2*RAD+1), (2*RAD+1));
printf("Disparities [%d:%d]\n", minDisp, maxDisp);
printf("GPU processing time : %.4f (ms)\n", msecTotal);
printf("Pixel throughput : %.3f Mpixels/sec\n", ((float)(w *h*1000.f)/msecTotal)/1000000);
// calculate sum of resultant GPU image
unsigned int checkSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
checkSum += h_odata[i];
}
printf("GPU Checksum = %u, ", checkSum);
// write out the resulting disparity image.
unsigned char *dispOut = (unsigned char *)malloc(numData);
int mult = 20;
char *fnameOut = "output_GPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("GPU image: <%s>\n", fnameOut);
sdkSavePGM(fnameOut, dispOut, w, h);
//compute reference solution
printf("Computing CPU reference...\n");
cpu_gold_stereo((unsigned int *)h_img0, (unsigned int *)h_img1, (unsigned int *)h_odata, w, h, minDisp, maxDisp);
unsigned int cpuCheckSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
cpuCheckSum += h_odata[i];
}
printf("CPU Checksum = %u, ", cpuCheckSum);
char *cpuFnameOut = "output_CPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("CPU image: <%s>\n", cpuFnameOut);
sdkSavePGM(cpuFnameOut, dispOut, w, h);
// cleanup memory
checkCudaErrors(hipFree(d_odata));
checkCudaErrors(hipFree(d_img0));
checkCudaErrors(hipFree(d_img1));
if (h_odata != NULL) free(h_odata);
if (h_img0 != NULL) free(h_img0);
if (h_img1 != NULL) free(h_img1);
if (dispOut != NULL) free(dispOut);
sdkDeleteTimer(&timer);
hipDeviceReset();
exit((checkSum == cpuCheckSum) ? EXIT_SUCCESS : EXIT_FAILURE);
}
| 6af519eac6ddd70494b4099db241a2d93285220d.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* A CUDA program that demonstrates how to compute a stereo disparity map using
* SIMD SAD (Sum of Absolute Difference) intrinsics
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include <cuda_runtime.h>
#include "stereoDisparity_kernel.cuh"
// includes, project
#include <helper_functions.h> // helper for shared that are common to CUDA SDK samples
#include <helper_cuda.h> // helper for checking cuda initialization and error checking
#include <helper_string.h> // helper functions for string parsing
static char *sSDKsample = "[stereoDisparity]\0";
int iDivUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! CUDA Sample for calculating depth maps
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char **argv)
{
cudaDeviceProp deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev = 0;
// This will pick the best possible CUDA capable device
dev = findCudaDevice(argc, (const char **)argv);
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x20)
{
printf("%s: requires a minimum CUDA compute 2.0 capability\n", sSDKsample);
exit(EXIT_SUCCESS);
}
StopWatchInterface *timer;
sdkCreateTimer(&timer);
// Search paramters
int minDisp = -16;
int maxDisp = 0;
// Load image data
//allocate mem for the images on host side
//initialize pointers to NULL to request lib call to allocate as needed
// PPM images are loaded into 4 byte/pixel memory (RGBX)
unsigned char *h_img0 = NULL;
unsigned char *h_img1 = NULL;
unsigned int w, h;
char *fname0 = sdkFindFilePath("stereo.im0.640x533.ppm", argv[0]);
char *fname1 = sdkFindFilePath("stereo.im1.640x533.ppm", argv[0]);
printf("Loaded <%s> as image 0\n", fname0);
if (!sdkLoadPPM4ub(fname0, &h_img0, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname0);
}
printf("Loaded <%s> as image 1\n", fname1);
if (!sdkLoadPPM4ub(fname1, &h_img1, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname1);
}
dim3 numThreads = dim3(blockSize_x, blockSize_y, 1);
dim3 numBlocks = dim3(iDivUp(w, numThreads.x), iDivUp(h, numThreads.y));
unsigned int numData = w*h;
unsigned int memSize = sizeof(int) * numData;
//allocate mem for the result on host side
unsigned int *h_odata = (unsigned int *)malloc(memSize);
//initalize the memory
for (unsigned int i = 0; i < numData; i++)
h_odata[i] = 0;
// allocate device memory for result
unsigned int *d_odata, *d_img0, *d_img1;
checkCudaErrors(cudaMalloc((void **) &d_odata, memSize));
checkCudaErrors(cudaMalloc((void **) &d_img0, memSize));
checkCudaErrors(cudaMalloc((void **) &d_img1, memSize));
// copy host memory to device to initialize to zeros
checkCudaErrors(cudaMemcpy(d_img0, h_img0, memSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_img1, h_img1, memSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_odata, h_odata, memSize, cudaMemcpyHostToDevice));
size_t offset = 0;
cudaChannelFormatDesc ca_desc0 = cudaCreateChannelDesc<unsigned int>();
cudaChannelFormatDesc ca_desc1 = cudaCreateChannelDesc<unsigned int>();
tex2Dleft.addressMode[0] = cudaAddressModeClamp;
tex2Dleft.addressMode[1] = cudaAddressModeClamp;
tex2Dleft.filterMode = cudaFilterModePoint;
tex2Dleft.normalized = false;
tex2Dright.addressMode[0] = cudaAddressModeClamp;
tex2Dright.addressMode[1] = cudaAddressModeClamp;
tex2Dright.filterMode = cudaFilterModePoint;
tex2Dright.normalized = false;
checkCudaErrors(cudaBindTexture2D(&offset, tex2Dleft, d_img0, ca_desc0, w, h, w*4));
assert(offset == 0);
checkCudaErrors(cudaBindTexture2D(&offset, tex2Dright, d_img1, ca_desc1, w, h, w*4));
assert(offset == 0);
// First run the warmup kernel (which we'll use to get the GPU in the correct max power state
stereoDisparityKernel<<<numBlocks, numThreads>>>(d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
printf("Launching CUDA stereoDisparityKernel()\n");
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// launch the stereoDisparity kernel
stereoDisparityKernel<<<numBlocks, numThreads>>>(d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
// Check to make sure the kernel didn't fail
getLastCudaError("Kernel execution failed");
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
//Copy result from device to host for verification
checkCudaErrors(cudaMemcpy(h_odata, d_odata, memSize, cudaMemcpyDeviceToHost));
printf("Input Size [%dx%d], ", w, h);
printf("Kernel size [%dx%d], ", (2*RAD+1), (2*RAD+1));
printf("Disparities [%d:%d]\n", minDisp, maxDisp);
printf("GPU processing time : %.4f (ms)\n", msecTotal);
printf("Pixel throughput : %.3f Mpixels/sec\n", ((float)(w *h*1000.f)/msecTotal)/1000000);
// calculate sum of resultant GPU image
unsigned int checkSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
checkSum += h_odata[i];
}
printf("GPU Checksum = %u, ", checkSum);
// write out the resulting disparity image.
unsigned char *dispOut = (unsigned char *)malloc(numData);
int mult = 20;
char *fnameOut = "output_GPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("GPU image: <%s>\n", fnameOut);
sdkSavePGM(fnameOut, dispOut, w, h);
//compute reference solution
printf("Computing CPU reference...\n");
cpu_gold_stereo((unsigned int *)h_img0, (unsigned int *)h_img1, (unsigned int *)h_odata, w, h, minDisp, maxDisp);
unsigned int cpuCheckSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
cpuCheckSum += h_odata[i];
}
printf("CPU Checksum = %u, ", cpuCheckSum);
char *cpuFnameOut = "output_CPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("CPU image: <%s>\n", cpuFnameOut);
sdkSavePGM(cpuFnameOut, dispOut, w, h);
// cleanup memory
checkCudaErrors(cudaFree(d_odata));
checkCudaErrors(cudaFree(d_img0));
checkCudaErrors(cudaFree(d_img1));
if (h_odata != NULL) free(h_odata);
if (h_img0 != NULL) free(h_img0);
if (h_img1 != NULL) free(h_img1);
if (dispOut != NULL) free(dispOut);
sdkDeleteTimer(&timer);
cudaDeviceReset();
exit((checkSum == cpuCheckSum) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
e144d08b28afb5e9091f17def995c19835a05912.hip | // !!! This is a file automatically generated by hipify!!!
#include "splat_render_cuda.hpp"
#include "depth_camera_cuda.hpp"
//#include <hip/hip_runtime.h>
#include <ftl/cuda_matrix_util.hpp>
#include "splat_params.hpp"
#include "mls_cuda.hpp"
#include <ftl/depth_camera.hpp>
#define T_PER_BLOCK 8
#define UPSAMPLE_FACTOR 1.8f
#define WARP_SIZE 32
#define DEPTH_THRESHOLD 0.05f
#define UPSAMPLE_MAX 60
#define MAX_ITERATIONS 32 // Note: Must be multiple of 32
#define SPATIAL_SMOOTHING 0.005f
using ftl::cuda::TextureObject;
using ftl::render::Parameters;
extern __constant__ ftl::voxhash::DepthCameraCUDA c_cameras[MAX_CAMERAS];
__global__ void clearColourKernel(TextureObject<uchar4> colour) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < colour.width() && y < colour.height()) {
//depth(x,y) = 0x7f800000; //PINF;
colour(x,y) = make_uchar4(76,76,82,0);
}
}
__device__ inline bool isStable(const float3 &previous, const float3 &estimate, const SplatParams ¶ms, float d) {
const float psize = 2.0f * d / params.camera.fx;
//printf("PSIZE %f\n", psize);
return fabs(previous.x - estimate.x) <= psize &&
fabs(previous.y - estimate.y) <= psize &&
fabs(previous.z - estimate.z) <= psize;
}
// ===== PASS 1 : Gather & Upsample (Depth) ====================================
/*
* Pass 1: Directly render raw points from all cameras, but upsample the points
* if their spacing is within smoothing threshold but greater than their pixel
* size in the original image.
*/
__global__ void dibr_merge_upsample_kernel(TextureObject<int> depth, int cam, SplatParams params) {
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
//const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y));
if (worldPos.x == MINF) return;
const float r = (camera.poseInverse * worldPos).z / camera.params.fx;
// Get virtual camera ray for splat centre and backface cull if possible
//const float3 rayOrigin = params.m_viewMatrixInverse * make_float3(0.0f,0.0f,0.0f);
//const float3 rayDir = normalize(params.m_viewMatrixInverse * params.camera.kinectDepthToSkeleton(x,y,1.0f) - rayOrigin);
//if (dot(rayDir, normal) > 0.0f) return;
// Find the virtual screen position of current point
const float3 camPos = params.m_viewMatrix * worldPos;
if (camPos.z < params.camera.m_sensorDepthWorldMin) return;
if (camPos.z > params.camera.m_sensorDepthWorldMax) return;
// TODO: Don't upsample so much that only minimum depth makes it through
// Consider also using some SDF style approach to accumulate and smooth a
// depth value between points
const int upsample = min(UPSAMPLE_MAX-2, int(0.01 * params.camera.fx / camPos.z))+3;
const float interval = 1.0f / float(upsample / 2);
// TODO:(Nick) Check depth buffer and don't do anything if already hidden?
// Each thread in warp takes an upsample point and updates corresponding depth buffer.
const int lane = threadIdx.x % WARP_SIZE;
for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
const float u = (i % upsample) - (upsample / 2);
const float v = (i / upsample) - (upsample / 2);
// Make an initial estimate of the points location
// Use centroid depth as estimate...?
const float3 point = params.m_viewMatrix * ftl::cuda::upsampled_point(camera.points, make_float2(float(x)+float(u)*interval, float(y)+float(v)*interval));
const float d = point.z;
const uint2 screenPos = params.camera.cameraToKinectScreen(point);
const unsigned int cx = screenPos.x;//+u;
const unsigned int cy = screenPos.y;//+v;
if (d > params.camera.m_sensorDepthWorldMin && d < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), d * 1000.0f);
}
}
}
/*
* Pass 1: Directly render each camera into virtual view but with no upsampling
* for sparse points.
*/
__global__ void dibr_merge_kernel(TextureObject<int> depth, int cam, SplatParams params) {
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
if (worldPos.x == MINF) return;
// Find the virtual screen position of current point
const float3 camPos = params.m_viewMatrix * worldPos;
if (camPos.z < params.camera.m_sensorDepthWorldMin) return;
if (camPos.z > params.camera.m_sensorDepthWorldMax) return;
const float d = camPos.z;
const uint2 screenPos = params.camera.cameraToKinectScreen(camPos);
const unsigned int cx = screenPos.x;
const unsigned int cy = screenPos.y;
if (d > params.camera.m_sensorDepthWorldMin && d < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), d * 1000.0f);
}
}
// ===== PASS 2 : Splat Visible Surface ========================================
/*
* Pass 2: Determine depth buffer with enough accuracy for a visibility test in pass 2.
* These values are also used as the actual surface estimate during rendering so should
* at least be plane or sphere fitted if not MLS smoothed onto the actual surface.
*/
__global__ void OLD_dibr_visibility_kernel(TextureObject<int> depth, int cam, SplatParams params) {
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y));
if (worldPos.x == MINF) return;
const float r = (camera.poseInverse * worldPos).z / camera.params.fx;
// Get virtual camera ray for splat centre and backface cull if possible
//const float3 rayOrigin = params.m_viewMatrixInverse * make_float3(0.0f,0.0f,0.0f);
//const float3 rayDir = normalize(params.m_viewMatrixInverse * params.camera.kinectDepthToSkeleton(x,y,1.0f) - rayOrigin);
//if (dot(rayDir, normal) > 0.0f) return;
// Find the virtual screen position of current point
const float3 camPos = params.m_viewMatrix * worldPos;
if (camPos.z < params.camera.m_sensorDepthWorldMin) return;
if (camPos.z > params.camera.m_sensorDepthWorldMax) return;
const uint2 screenPos = params.camera.cameraToKinectScreen(camPos);
const int upsample = min(UPSAMPLE_MAX, int((r) * params.camera.fx / camPos.z));
// Not on screen so stop now...
if (screenPos.x - upsample >= depth.width() || screenPos.y - upsample >= depth.height()) return;
// TODO:(Nick) Check depth buffer and don't do anything if already hidden?
// Each thread in warp takes an upsample point and updates corresponding depth buffer.
const int lane = threadIdx.x % WARP_SIZE;
for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
const float u = (i % upsample) - (upsample / 2);
const float v = (i / upsample) - (upsample / 2);
// Make an initial estimate of the points location
// Use centroid depth as estimate...?
float3 nearest = ftl::cuda::screen_centroid<1>(camera.points, make_float2(screenPos.x+u, screenPos.y+v), make_int2(x,y), params, upsample);
// Use current points z as estimate
//float3 nearest = params.camera.kinectDepthToSkeleton(screenPos.x+u,screenPos.y+v,camPos.z);
// Or calculate upper and lower bounds for depth and do gradient
// descent until the gradient change is too small or max iter is reached
// and depth remains within the bounds.
// How to find min and max depths?
//float ld = nearest.z;
// TODO: (Nick) Estimate depth using points plane, but needs better normals.
//float t;
//if (ftl::cuda::intersectPlane(normal, worldPos, rayOrigin, rayDir, t)) {
// Plane based estimate of surface at this pixel
//const float3 nearest = rayOrigin + rayDir * camPos.z;
float3 output;
// Use MLS of camera neighbor points to get more exact estimate
// Iterate until pixel is stable on the surface.
for (int k=0; k<MAX_ITERATIONS; ++k) {
// TODO:(Nick) Should perhaps use points from all cameras?
// Instead of doing each camera separately...
// If the depth already is close then it has already been done and can skip this point
if (ftl::cuda::mls_point_surface<1>(camera.points, make_int2(x,y), params.m_viewMatrixInverse * nearest, output, SPATIAL_SMOOTHING) <= 0.0f) {
/*const unsigned int cx = screenPos.x;
const unsigned int cy = screenPos.y;
if (cx < depth.width() && cy < depth.height()) {
atomicMax(&depth(cx,cy), 10000.0f);
}*/
break;
}
//ftl::cuda::render_depth(depth, params, output);
output = params.m_viewMatrix * output;
// This is essentially the SDF function f(x), only the normal should be estimated also from the weights
//const float d = nearest.z + (normal.x*output.x + normal.y*output.y + normal.z*output.z);
const float d = nearest.z + copysignf(0.5f*length(output - nearest), output.z - nearest.z);
nearest = params.camera.kinectDepthToSkeleton(screenPos.x+u,screenPos.y+v,d);
const float2 sp = params.camera.cameraToKinectScreenFloat(output);
//if (isStable(nearest, output, params, d)) {
//if (fabs(sp.x - float(screenPos.x+u)) < 2.0f && fabs(sp.y - float(screenPos.y+v)) < 2.0f) {
if (length(output - nearest) <= 2.0f * params.camera.fx / camPos.z) {
const unsigned int cx = screenPos.x+u;
const unsigned int cy = screenPos.y+v;
if (d > params.camera.m_sensorDepthWorldMin && d < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), d * 1000.0f);
}
break;
}
/*if (k >= MAX_ITERATIONS-1 && length(output - nearest) <= SPATIAL_SMOOTHING) {
const unsigned int cx = screenPos.x+u;
const unsigned int cy = screenPos.y+v;
if (d > params.camera.m_sensorDepthWorldMin && d < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
//atomicMin(&depth(cx,cy), d * 1000.0f);
printf("ERR = %f, %f\n", fabs(sp.x - float(screenPos.x+u)), fabs(sp.y - float(screenPos.y+v)));
}
}*/
//nearest = params.camera.kinectDepthToSkeleton(screenPos.x+u,screenPos.y+v,d); // ld + (d - ld)*0.8f
//ld = d;
}
//}
}
}
// ------ Alternative for pass 2: principle surfaces ---------------------------
#define NEIGHBOR_RADIUS 1
#define MAX_NEIGHBORS ((NEIGHBOR_RADIUS*2+1)*(NEIGHBOR_RADIUS*2+1))
/*
* Pass 2: Determine depth buffer with enough accuracy for a visibility test in pass 2.
* These values are also used as the actual surface estimate during rendering so should
* at least be plane or sphere fitted if not MLS smoothed onto the actual surface.
*/
__global__ void dibr_visibility_principal_kernel(TextureObject<int> depth, int cam, SplatParams params) {
__shared__ float3 neighborhood_cache[2*T_PER_BLOCK][MAX_NEIGHBORS];
__shared__ int minimum[2*T_PER_BLOCK];
__shared__ int maximum[2*T_PER_BLOCK];
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
const int warp = threadIdx.x / WARP_SIZE + threadIdx.y*2;
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
//const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y));
if (worldPos.x == MINF) return;
const float r = (camera.poseInverse * worldPos).z / camera.params.fx;
// Get virtual camera ray for splat centre and backface cull if possible
//const float3 rayOrigin = params.m_viewMatrixInverse * make_float3(0.0f,0.0f,0.0f);
//const float3 rayDir = normalize(params.m_viewMatrixInverse * params.camera.kinectDepthToSkeleton(x,y,1.0f) - rayOrigin);
//if (dot(rayDir, normal) > 0.0f) return;
// Find the virtual screen position of current point
const float3 camPos = params.m_viewMatrix * worldPos;
if (camPos.z < params.camera.m_sensorDepthWorldMin) return;
if (camPos.z > params.camera.m_sensorDepthWorldMax) return;
const uint2 screenPos = params.camera.cameraToKinectScreen(camPos);
const int upsample = min(UPSAMPLE_MAX, int((4.0f*r) * params.camera.fx / camPos.z));
// Not on screen so stop now...
if (screenPos.x - upsample >= depth.width() || screenPos.y - upsample >= depth.height()) return;
// TODO:(Nick) Check depth buffer and don't do anything if already hidden?
// TODO:(Nick) Preload point neighbors and transform to eye
const int lane = threadIdx.x % WARP_SIZE;
if (lane == 0) {
minimum[warp] = 100000000;
maximum[warp] = -100000000;
}
__syncwarp();
for (int i=lane; i<MAX_NEIGHBORS; i+=WARP_SIZE) {
const int u = (i % (2*NEIGHBOR_RADIUS+1)) - NEIGHBOR_RADIUS;
const int v = (i / (2*NEIGHBOR_RADIUS+1)) - NEIGHBOR_RADIUS;
const float3 point = params.m_viewMatrix * make_float3(tex2D<float4>(camera.points, x+u, y+v));
neighborhood_cache[warp][i] = point;
if (length(point - camPos) <= 0.04f) {
atomicMin(&minimum[warp], point.z*1000.0f);
atomicMax(&maximum[warp], point.z*1000.0f);
}
}
__syncwarp();
const float interval = (float(maximum[warp])/1000.0f - float(minimum[warp]) / 1000.0f) / float(MAX_ITERATIONS);
//if (y == 200) printf("interval: %f\n", interval);
// TODO:(Nick) Find min and max depths of neighbors to estimate z bounds
// Each thread in warp takes an upsample point and updates corresponding depth buffer.
for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
const float u = (i % upsample) - (upsample / 2);
const float v = (i / upsample) - (upsample / 2);
// Make an initial estimate of the points location
// Use centroid depth as estimate...?
//float3 nearest = ftl::cuda::screen_centroid<1>(camera.points, make_float2(screenPos.x+u, screenPos.y+v), make_int2(x,y), params, upsample);
// Use current points z as estimate
// TODO: Use min point as estimate
float3 nearest = params.camera.kinectDepthToSkeleton(screenPos.x+u,screenPos.y+v,float(minimum[warp])/1000.0f);
// Or calculate upper and lower bounds for depth and do gradient
// descent until the gradient change is too small or max iter is reached
// and depth remains within the bounds.
// How to find min and max depths?
// TODO: (Nick) Estimate depth using points plane, but needs better normals.
//float t;
//if (ftl::cuda::intersectPlane(normal, worldPos, rayOrigin, rayDir, t)) {
// Plane based estimate of surface at this pixel
//const float3 nearest = rayOrigin + rayDir * camPos.z;
// Use MLS of camera neighbor points to get more exact estimate
// Iterate until pixel is stable on the surface.
for (int k=0; k<MAX_ITERATIONS; ++k) {
// TODO:(Nick) Should perhaps use points from all cameras?
// Instead of doing each camera separately...
// If the depth already is close then it has already been done and can skip this point
const float energy = ftl::cuda::mls_point_energy<MAX_NEIGHBORS>(neighborhood_cache[warp], nearest, SPATIAL_SMOOTHING);
if (energy <= 0.0f) break;
//ftl::cuda::render_depth(depth, params, output);
// This is essentially the SDF function f(x), only the normal should be estimated also from the weights
//const float d = nearest.z + (normal.x*output.x + normal.y*output.y + normal.z*output.z);
const float d = nearest.z;
nearest = params.camera.kinectDepthToSkeleton(screenPos.x+u,screenPos.y+v,d+interval);
if (energy >= 0.1f) {
const unsigned int cx = screenPos.x+u;
const unsigned int cy = screenPos.y+v;
if (d > params.camera.m_sensorDepthWorldMin && d < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), d * 1000.0f);
}
break;
}
}
//}
}
}
#define NEIGHBOR_RADIUS_2 3
#define NEIGHBOR_WINDOW ((NEIGHBOR_RADIUS_2*2+1)*(NEIGHBOR_RADIUS_2*2+1))
#define MAX_NEIGHBORS_2 32
#define FULL_MASK 0xffffffff
__device__ inline float warpMax(float e) {
for (int i = WARP_SIZE/2; i > 0; i /= 2) {
const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
e = max(e, other);
}
return e;
}
__device__ inline float warpMin(float e) {
for (int i = WARP_SIZE/2; i > 0; i /= 2) {
const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
e = min(e, other);
}
return e;
}
#define ENERGY_THRESHOLD 0.1f
#define SMOOTHING_MULTIPLIER_A 10.0f // For surface search
#define SMOOTHING_MULTIPLIER_B 4.0f // For z contribution
#define SMOOTHING_MULTIPLIER_C 4.0f // For colour contribution
/*
* Pass 2: Determine depth buffer with enough accuracy for a visibility test in pass 2.
* These values are also used as the actual surface estimate during rendering so should
* at least be plane or sphere fitted if not MLS smoothed onto the actual surface.
*
* This version uses a previous point render as neighbour source.
*/
__global__ void dibr_visibility_principal_kernel2(TextureObject<int> point_in, TextureObject<int> depth, SplatParams params) {
__shared__ float3 neighborhood_cache[2*T_PER_BLOCK][MAX_NEIGHBORS_2];
__shared__ int minimum[2*T_PER_BLOCK];
__shared__ int maximum[2*T_PER_BLOCK];
__shared__ unsigned int nidx[2*T_PER_BLOCK];
const int tid = (threadIdx.x + threadIdx.y * blockDim.x);
const int warp = tid / WARP_SIZE; //threadIdx.x / WARP_SIZE + threadIdx.y*2;
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// Starting point for surface minimum
float clusterBase = params.camera.m_sensorDepthWorldMin;
// Loop to a deeper surface if not on the first one selected...
while (clusterBase < params.camera.m_sensorDepthWorldMax) {
const int lane = tid % WARP_SIZE;
if (lane == 0) {
minimum[warp] = 100000000;
maximum[warp] = -100000000;
nidx[warp] = 0;
}
__syncwarp();
// Search for a valid minimum neighbour
// TODO: Should this really be minimum or the median of a depth cluster?
// cluster median seems very hard to calculate...
for (int i=lane; i<NEIGHBOR_WINDOW; i+=WARP_SIZE) {
const int u = (i % (2*NEIGHBOR_RADIUS_2+1)) - NEIGHBOR_RADIUS_2;
const int v = (i / (2*NEIGHBOR_RADIUS_2+1)) - NEIGHBOR_RADIUS_2;
const float3 point = params.camera.kinectDepthToSkeleton(x+u, y+v, float(point_in.tex2D(x+u, y+v)) / 1000.0f);
const float3 camPos = params.camera.kinectDepthToSkeleton(x, y, point.z);
// If it is close enough...
// TODO: smoothing / strength should be determined by a number of factors including:
// 1) Depth from original source
// 2) Colour contrast in underlying RGB
// 3) Estimated noise levels in depth values
if (point.z > clusterBase && point.z < params.camera.m_sensorDepthWorldMax && length(point - camPos) <= SMOOTHING_MULTIPLIER_A*(point.z / params.camera.fx)) {
atomicMin(&minimum[warp], point.z*1000.0f);
}
}
__syncwarp();
const float minDepth = float(minimum[warp])/1000.0f;
// Preload valid neighbour points from within a window. A point is valid
// if it is within a specific distance of the minimum.
// Also calculate the maximum at the same time.
// TODO: Could here do a small search in each camera? This would allow all
// points to be considered, even those masked in our depth input.
const float3 minPos = params.camera.kinectDepthToSkeleton(x, y, minDepth);
for (int i=lane; i<NEIGHBOR_WINDOW; i+=WARP_SIZE) {
const int u = (i % (2*NEIGHBOR_RADIUS_2+1)) - NEIGHBOR_RADIUS_2;
const int v = (i / (2*NEIGHBOR_RADIUS_2+1)) - NEIGHBOR_RADIUS_2;
const float3 point = params.camera.kinectDepthToSkeleton(x+u, y+v, float(point_in.tex2D(x+u, y+v)) / 1000.0f);
// If it is close enough...
if (point.z > params.camera.m_sensorDepthWorldMin && point.z < params.camera.m_sensorDepthWorldMax && length(point - minPos) <= SMOOTHING_MULTIPLIER_A*(point.z / params.camera.fx)) {
// Append to neighbour list
//unsigned int idx = atomicInc(&nidx[warp], MAX_NEIGHBORS_2-1);
unsigned int idx = atomicAdd(&nidx[warp], 1);
if (idx >= MAX_NEIGHBORS_2) break;
neighborhood_cache[warp][idx] = point;
atomicMax(&maximum[warp], point.z*1000.0f);
}
}
__syncwarp();
const float maxDepth = float(maximum[warp])/1000.0f;
const float interval = (maxDepth - minDepth) / float(MAX_ITERATIONS);
if (minDepth >= params.camera.m_sensorDepthWorldMax) return;
if (maxDepth <= params.camera.m_sensorDepthWorldMin) return;
//if (y == 200) printf("interval: %f\n", maxDepth);
// If all samples say same depth, then agree and return
// TODO: Check this is valid, since small energies should be removed...
/*if (fabs(minDepth - maxDepth) < 0.0001f) {
if (lane == 0) {
const unsigned int cx = x;
const unsigned int cy = y;
if (minDepth < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), minDepth * 1000.0f);
}
}
return;
}*/
float maxenergy = -1.0f;
float bestdepth = 0.0f;
// Search for best or threshold energy
for (int k=lane; k<MAX_ITERATIONS; k+=WARP_SIZE) {
const float3 nearest = params.camera.kinectDepthToSkeleton(x,y,minDepth+float(k)*interval);
const float myenergy = ftl::cuda::mls_point_energy<MAX_NEIGHBORS_2>(neighborhood_cache[warp], nearest, min(nidx[warp], MAX_NEIGHBORS_2), SMOOTHING_MULTIPLIER_B*(nearest.z/params.camera.fx));
const float newenergy = warpMax(max(myenergy, maxenergy));
bestdepth = (myenergy == newenergy) ? nearest.z : (newenergy > maxenergy) ? 0.0f : bestdepth;
maxenergy = newenergy;
}
// If enough energy was found and this thread was the one that found the best
// then output the depth that this energy occured at.
if (bestdepth > 0.0f && maxenergy >= ENERGY_THRESHOLD) {
//printf("E D %f %f\n", maxenergy, bestdepth);
const unsigned int cx = x;
const unsigned int cy = y;
if (bestdepth > params.camera.m_sensorDepthWorldMin && bestdepth < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), bestdepth * 1000.0f);
//depth(cx,cy) = bestdepth * 1000.0f;
}
}
// TODO: Could the threshold depend upon the number of points? Fewer points
// due to distance is incorrect since really there may not be fewer points
// Perhaps the best option is to make it depend on depth ... really close
// and really far both has lower thresholds due to point densities. Other
// option is smoothing factor and surface distances alter with distance to
// vary the number of points used ... smoothing factor could be a multiple
// of pixel size at given distance. Density from original source is also
// an influencer of smoothing factor and thresholds. Colour contrast also
// has a weighting influence, high contrast is high certainty in the
// disparity so such points should have a high influence over choice of
// surface location.
//
// Magnitude vs dispersion factor in the energy function ...
// * Mag is certainty of surface location
// * Dispersion is how far to propagate that certainty,
if (maxenergy >= ENERGY_THRESHOLD) return;
// Move to next possible surface...
clusterBase = minDepth + SMOOTHING_MULTIPLIER_B*(minDepth / params.camera.fx);
};
}
// ===== Pass 2 and 3 : Attribute contributions ================================
__device__ inline float4 make_float4(const uchar4 &c) {
return make_float4(c.x,c.y,c.z,c.w);
}
/*
* Pass 2: Accumulate attribute contributions if the points pass a visibility test.
*/
__global__ void dibr_attribute_contrib_kernel(
TextureObject<int> depth_in,
TextureObject<float4> colour_out,
TextureObject<float4> normal_out,
TextureObject<float> contrib_out, int cam,
SplatParams params) {
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
const int tid = (threadIdx.x + threadIdx.y * blockDim.x);
//const int warp = tid / WARP_SIZE;
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
//const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y));
if (worldPos.x == MINF) return;
const float r = (camera.poseInverse * worldPos).z / camera.params.fx;
const float3 camPos = params.m_viewMatrix * worldPos;
if (camPos.z < params.camera.m_sensorDepthWorldMin) return;
if (camPos.z > params.camera.m_sensorDepthWorldMax) return;
const uint2 screenPos = params.camera.cameraToKinectScreen(camPos);
const int upsample = 8; //min(UPSAMPLE_MAX, int((5.0f*r) * params.camera.fx / camPos.z));
// Not on screen so stop now...
if (screenPos.x >= depth_in.width() || screenPos.y >= depth_in.height()) return;
// Is this point near the actual surface and therefore a contributor?
const float d = ((float)depth_in.tex2D((int)screenPos.x, (int)screenPos.y)/1000.0f);
//if (abs(d - camPos.z) > DEPTH_THRESHOLD) return;
// TODO:(Nick) Should just one thread load these to shared mem?
const float4 colour = make_float4(tex2D<uchar4>(camera.colour, x, y));
const float4 normal = tex2D<float4>(camera.normal, x, y);
// Each thread in warp takes an upsample point and updates corresponding depth buffer.
const int lane = tid % WARP_SIZE;
for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
const float u = (i % upsample) - (upsample / 2);
const float v = (i / upsample) - (upsample / 2);
// Use the depth buffer to determine this pixels 3D position in camera space
const float d = ((float)depth_in.tex2D(screenPos.x+u, screenPos.y+v)/1000.0f);
const float3 nearest = params.camera.kinectDepthToSkeleton((int)(screenPos.x+u),(int)(screenPos.y+v),d);
// What is contribution of our current point at this pixel?
const float weight = ftl::cuda::spatialWeighting(length(nearest - camPos), SMOOTHING_MULTIPLIER_C*(nearest.z/params.camera.fx));
if (screenPos.x+u < colour_out.width() && screenPos.y+v < colour_out.height() && weight > 0.0f) { // TODO: Use confidence threshold here
const float4 wcolour = colour * weight;
const float4 wnormal = normal * weight;
//printf("Z %f\n", d);
// Add this points contribution to the pixel buffer
atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v), wcolour.x);
atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v)+1, wcolour.y);
atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v)+2, wcolour.z);
atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v)+3, wcolour.w);
atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v), wnormal.x);
atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v)+1, wnormal.y);
atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v)+2, wnormal.z);
atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v)+3, wnormal.w);
atomicAdd(&contrib_out(screenPos.x+u, screenPos.y+v), weight);
}
}
}
/*
* Pass 2: Accumulate attribute contributions if the points pass a visibility test.
*/
/*__global__ void dibr_attribute_contrib_kernel(
TextureObject<int> depth_in,
TextureObject<uchar4> colour_out,
TextureObject<float4> normal_out, int numcams, SplatParams params) {
const int i = threadIdx.y*blockDim.y + threadIdx.x;
const int bx = blockIdx.x*blockDim.x;
const int by = blockIdx.y*blockDim.y;
const int x = bx + threadIdx.x;
const int y = by + threadIdx.y;
for (int j=0; j<numcams; ++j) {
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[j];
float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
float r = (camera.poseInverse * worldPos).z;
//if (ftl::cuda::mls_point_surface<3>(camera.points, make_int2(x,y), worldPos, 0.02f) < 0.001f) continue;
if (worldPos.x == MINF) continue;
const float3 camPos = params.m_viewMatrix * worldPos;
// Estimate upsample factor using ratio of source depth and output depth
const int upsample = min(15, (int)(UPSAMPLE_FACTOR * (r / camPos.z))+1);
const float upfactor = 2.0f / (float)(upsample);
for (int v=0; v<upsample; ++v) {
for (int u=0; u<upsample; ++u) {
float3 point;
const ftl::cuda::fragment nearest = ftl::cuda::upsampled_point(camera.points, camera.normal, camera.colour,
make_float2((float)x-1.0f+u*upfactor,(float)y-1.0f+v*upfactor));
//if (ftl::cuda::mls_point_surface<3>(camera.points, make_int2(x,y), nearest, point, 0.02f) < 0.001f) continue;
ftl::cuda::render_fragment(depth_in, normal_out, colour_out, params, nearest);
}
}
}
}*/
__global__ void dibr_normalise_kernel(
TextureObject<float4> colour_in,
TextureObject<uchar4> colour_out,
TextureObject<float4> normals,
TextureObject<float> contribs) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < colour_in.width() && y < colour_in.height()) {
const float4 colour = colour_in.tex2D((int)x,(int)y);
const float4 normal = normals.tex2D((int)x,(int)y);
const float contrib = contribs.tex2D((int)x,(int)y);
if (contrib > 0.0f) {
colour_out(x,y) = make_uchar4(colour.x / contrib, colour.y / contrib, colour.z / contrib, 0);
normals(x,y) = normal / contrib;
}
}
}
void ftl::cuda::dibr(const TextureObject<int> &depth_out,
const TextureObject<uchar4> &colour_out,
const TextureObject<float4> &normal_out,
const TextureObject<float> &confidence_out,
const TextureObject<float4> &tmp_colour,
const TextureObject<int> &tmp_depth,
int numcams,
const SplatParams ¶ms,
hipStream_t stream) {
const dim3 sgridSize((depth_out.width() + 2 - 1)/2, (depth_out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 sblockSize(2*WARP_SIZE, T_PER_BLOCK);
const dim3 gridSize((depth_out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( clearColourKernel), dim3(gridSize), dim3(blockSize), 0, stream, colour_out);
ftl::cuda::clear_to_zero(confidence_out, stream);
ftl::cuda::clear_colour(tmp_colour, stream);
ftl::cuda::clear_colour(normal_out, stream);
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
//int i=3;
bool noSplatting = params.m_flags & ftl::render::kNoSplatting;
// Pass 1, gather and upsample depth maps
if (params.m_flags & ftl::render::kNoUpsampling) {
for (int i=0; i<numcams; ++i)
hipLaunchKernelGGL(( dibr_merge_kernel), dim3(gridSize), dim3(blockSize), 0, stream, (noSplatting) ? depth_out : tmp_depth, i, params);
} else {
for (int i=0; i<numcams; ++i)
hipLaunchKernelGGL(( dibr_merge_upsample_kernel), dim3(sgridSize), dim3(sblockSize), 0, stream, (noSplatting) ? depth_out : tmp_depth, i, params);
}
if (noSplatting) {
// Pass 3, accumulate all point contributions to pixels
for (int i=0; i<numcams; ++i)
hipLaunchKernelGGL(( dibr_attribute_contrib_kernel), dim3(sgridSize), dim3(sblockSize), 0, stream, depth_out, tmp_colour, normal_out, confidence_out, i, params);
} else {
// Pass 2
hipLaunchKernelGGL(( dibr_visibility_principal_kernel2), dim3(sgridSize), dim3(sblockSize), 0, stream, tmp_depth, depth_out, params);
// Pass 3, accumulate all point contributions to pixels
for (int i=0; i<numcams; ++i)
hipLaunchKernelGGL(( dibr_attribute_contrib_kernel), dim3(sgridSize), dim3(sblockSize), 0, stream, depth_out, tmp_colour, normal_out, confidence_out, i, params);
}
// Pass 2
//dibr_visibility_principal_kernel2<<<sgridSize, sblockSize, 0, stream>>>(tmp_depth, depth_out, params);
// Pass 2, merge a depth map from each camera.
//for (int i=0; i<numcams; ++i)
// dibr_visibility_principal_kernel<<<sgridSize, sblockSize, 0, stream>>>(depth_out, i, params);
// Pass 4, normalise contributions
hipLaunchKernelGGL(( dibr_normalise_kernel), dim3(gridSize), dim3(blockSize), 0, stream, tmp_colour, colour_out, normal_out, confidence_out);
cudaSafeCall( hipGetLastError() );
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
}
void ftl::cuda::dibr_raw(const TextureObject<int> &depth_out,
int numcams, const SplatParams ¶ms, hipStream_t stream) {
const dim3 gridSize((depth_out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
//dibr_depthmap_direct_kernel<<<gridSize, blockSize, 0, stream>>>(depth_out, numcams, params);
cudaSafeCall( hipGetLastError() );
#ifdef _DEBUG
cudaSafeCall(hipDeviceSynchronize());
#endif
}
| e144d08b28afb5e9091f17def995c19835a05912.cu | #include "splat_render_cuda.hpp"
#include "depth_camera_cuda.hpp"
//#include <cuda_runtime.h>
#include <ftl/cuda_matrix_util.hpp>
#include "splat_params.hpp"
#include "mls_cuda.hpp"
#include <ftl/depth_camera.hpp>
#define T_PER_BLOCK 8
#define UPSAMPLE_FACTOR 1.8f
#define WARP_SIZE 32
#define DEPTH_THRESHOLD 0.05f
#define UPSAMPLE_MAX 60
#define MAX_ITERATIONS 32 // Note: Must be multiple of 32
#define SPATIAL_SMOOTHING 0.005f
using ftl::cuda::TextureObject;
using ftl::render::Parameters;
extern __constant__ ftl::voxhash::DepthCameraCUDA c_cameras[MAX_CAMERAS];
__global__ void clearColourKernel(TextureObject<uchar4> colour) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < colour.width() && y < colour.height()) {
//depth(x,y) = 0x7f800000; //PINF;
colour(x,y) = make_uchar4(76,76,82,0);
}
}
__device__ inline bool isStable(const float3 &previous, const float3 &estimate, const SplatParams ¶ms, float d) {
const float psize = 2.0f * d / params.camera.fx;
//printf("PSIZE %f\n", psize);
return fabs(previous.x - estimate.x) <= psize &&
fabs(previous.y - estimate.y) <= psize &&
fabs(previous.z - estimate.z) <= psize;
}
// ===== PASS 1 : Gather & Upsample (Depth) ====================================
/*
* Pass 1: Directly render raw points from all cameras, but upsample the points
* if their spacing is within smoothing threshold but greater than their pixel
* size in the original image.
*/
__global__ void dibr_merge_upsample_kernel(TextureObject<int> depth, int cam, SplatParams params) {
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
//const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y));
if (worldPos.x == MINF) return;
const float r = (camera.poseInverse * worldPos).z / camera.params.fx;
// Get virtual camera ray for splat centre and backface cull if possible
//const float3 rayOrigin = params.m_viewMatrixInverse * make_float3(0.0f,0.0f,0.0f);
//const float3 rayDir = normalize(params.m_viewMatrixInverse * params.camera.kinectDepthToSkeleton(x,y,1.0f) - rayOrigin);
//if (dot(rayDir, normal) > 0.0f) return;
// Find the virtual screen position of current point
const float3 camPos = params.m_viewMatrix * worldPos;
if (camPos.z < params.camera.m_sensorDepthWorldMin) return;
if (camPos.z > params.camera.m_sensorDepthWorldMax) return;
// TODO: Don't upsample so much that only minimum depth makes it through
// Consider also using some SDF style approach to accumulate and smooth a
// depth value between points
const int upsample = min(UPSAMPLE_MAX-2, int(0.01 * params.camera.fx / camPos.z))+3;
const float interval = 1.0f / float(upsample / 2);
// TODO:(Nick) Check depth buffer and don't do anything if already hidden?
// Each thread in warp takes an upsample point and updates corresponding depth buffer.
const int lane = threadIdx.x % WARP_SIZE;
for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
const float u = (i % upsample) - (upsample / 2);
const float v = (i / upsample) - (upsample / 2);
// Make an initial estimate of the points location
// Use centroid depth as estimate...?
const float3 point = params.m_viewMatrix * ftl::cuda::upsampled_point(camera.points, make_float2(float(x)+float(u)*interval, float(y)+float(v)*interval));
const float d = point.z;
const uint2 screenPos = params.camera.cameraToKinectScreen(point);
const unsigned int cx = screenPos.x;//+u;
const unsigned int cy = screenPos.y;//+v;
if (d > params.camera.m_sensorDepthWorldMin && d < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), d * 1000.0f);
}
}
}
/*
* Pass 1: Directly render each camera into virtual view but with no upsampling
* for sparse points.
*/
__global__ void dibr_merge_kernel(TextureObject<int> depth, int cam, SplatParams params) {
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
if (worldPos.x == MINF) return;
// Find the virtual screen position of current point
const float3 camPos = params.m_viewMatrix * worldPos;
if (camPos.z < params.camera.m_sensorDepthWorldMin) return;
if (camPos.z > params.camera.m_sensorDepthWorldMax) return;
const float d = camPos.z;
const uint2 screenPos = params.camera.cameraToKinectScreen(camPos);
const unsigned int cx = screenPos.x;
const unsigned int cy = screenPos.y;
if (d > params.camera.m_sensorDepthWorldMin && d < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), d * 1000.0f);
}
}
// ===== PASS 2 : Splat Visible Surface ========================================
/*
* Pass 2: Determine depth buffer with enough accuracy for a visibility test in pass 2.
* These values are also used as the actual surface estimate during rendering so should
* at least be plane or sphere fitted if not MLS smoothed onto the actual surface.
*/
__global__ void OLD_dibr_visibility_kernel(TextureObject<int> depth, int cam, SplatParams params) {
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y));
if (worldPos.x == MINF) return;
const float r = (camera.poseInverse * worldPos).z / camera.params.fx;
// Get virtual camera ray for splat centre and backface cull if possible
//const float3 rayOrigin = params.m_viewMatrixInverse * make_float3(0.0f,0.0f,0.0f);
//const float3 rayDir = normalize(params.m_viewMatrixInverse * params.camera.kinectDepthToSkeleton(x,y,1.0f) - rayOrigin);
//if (dot(rayDir, normal) > 0.0f) return;
// Find the virtual screen position of current point
const float3 camPos = params.m_viewMatrix * worldPos;
if (camPos.z < params.camera.m_sensorDepthWorldMin) return;
if (camPos.z > params.camera.m_sensorDepthWorldMax) return;
const uint2 screenPos = params.camera.cameraToKinectScreen(camPos);
const int upsample = min(UPSAMPLE_MAX, int((r) * params.camera.fx / camPos.z));
// Not on screen so stop now...
if (screenPos.x - upsample >= depth.width() || screenPos.y - upsample >= depth.height()) return;
// TODO:(Nick) Check depth buffer and don't do anything if already hidden?
// Each thread in warp takes an upsample point and updates corresponding depth buffer.
const int lane = threadIdx.x % WARP_SIZE;
for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
const float u = (i % upsample) - (upsample / 2);
const float v = (i / upsample) - (upsample / 2);
// Make an initial estimate of the points location
// Use centroid depth as estimate...?
float3 nearest = ftl::cuda::screen_centroid<1>(camera.points, make_float2(screenPos.x+u, screenPos.y+v), make_int2(x,y), params, upsample);
// Use current points z as estimate
//float3 nearest = params.camera.kinectDepthToSkeleton(screenPos.x+u,screenPos.y+v,camPos.z);
// Or calculate upper and lower bounds for depth and do gradient
// descent until the gradient change is too small or max iter is reached
// and depth remains within the bounds.
// How to find min and max depths?
//float ld = nearest.z;
// TODO: (Nick) Estimate depth using points plane, but needs better normals.
//float t;
//if (ftl::cuda::intersectPlane(normal, worldPos, rayOrigin, rayDir, t)) {
// Plane based estimate of surface at this pixel
//const float3 nearest = rayOrigin + rayDir * camPos.z;
float3 output;
// Use MLS of camera neighbor points to get more exact estimate
// Iterate until pixel is stable on the surface.
for (int k=0; k<MAX_ITERATIONS; ++k) {
// TODO:(Nick) Should perhaps use points from all cameras?
// Instead of doing each camera separately...
// If the depth already is close then it has already been done and can skip this point
if (ftl::cuda::mls_point_surface<1>(camera.points, make_int2(x,y), params.m_viewMatrixInverse * nearest, output, SPATIAL_SMOOTHING) <= 0.0f) {
/*const unsigned int cx = screenPos.x;
const unsigned int cy = screenPos.y;
if (cx < depth.width() && cy < depth.height()) {
atomicMax(&depth(cx,cy), 10000.0f);
}*/
break;
}
//ftl::cuda::render_depth(depth, params, output);
output = params.m_viewMatrix * output;
// This is essentially the SDF function f(x), only the normal should be estimated also from the weights
//const float d = nearest.z + (normal.x*output.x + normal.y*output.y + normal.z*output.z);
const float d = nearest.z + copysignf(0.5f*length(output - nearest), output.z - nearest.z);
nearest = params.camera.kinectDepthToSkeleton(screenPos.x+u,screenPos.y+v,d);
const float2 sp = params.camera.cameraToKinectScreenFloat(output);
//if (isStable(nearest, output, params, d)) {
//if (fabs(sp.x - float(screenPos.x+u)) < 2.0f && fabs(sp.y - float(screenPos.y+v)) < 2.0f) {
if (length(output - nearest) <= 2.0f * params.camera.fx / camPos.z) {
const unsigned int cx = screenPos.x+u;
const unsigned int cy = screenPos.y+v;
if (d > params.camera.m_sensorDepthWorldMin && d < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), d * 1000.0f);
}
break;
}
/*if (k >= MAX_ITERATIONS-1 && length(output - nearest) <= SPATIAL_SMOOTHING) {
const unsigned int cx = screenPos.x+u;
const unsigned int cy = screenPos.y+v;
if (d > params.camera.m_sensorDepthWorldMin && d < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
//atomicMin(&depth(cx,cy), d * 1000.0f);
printf("ERR = %f, %f\n", fabs(sp.x - float(screenPos.x+u)), fabs(sp.y - float(screenPos.y+v)));
}
}*/
//nearest = params.camera.kinectDepthToSkeleton(screenPos.x+u,screenPos.y+v,d); // ld + (d - ld)*0.8f
//ld = d;
}
//}
}
}
// ------ Alternative for pass 2: principle surfaces ---------------------------
#define NEIGHBOR_RADIUS 1
#define MAX_NEIGHBORS ((NEIGHBOR_RADIUS*2+1)*(NEIGHBOR_RADIUS*2+1))
/*
* Pass 2: Determine depth buffer with enough accuracy for a visibility test in pass 2.
* These values are also used as the actual surface estimate during rendering so should
* at least be plane or sphere fitted if not MLS smoothed onto the actual surface.
*/
__global__ void dibr_visibility_principal_kernel(TextureObject<int> depth, int cam, SplatParams params) {
__shared__ float3 neighborhood_cache[2*T_PER_BLOCK][MAX_NEIGHBORS];
__shared__ int minimum[2*T_PER_BLOCK];
__shared__ int maximum[2*T_PER_BLOCK];
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
const int warp = threadIdx.x / WARP_SIZE + threadIdx.y*2;
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
//const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y));
if (worldPos.x == MINF) return;
const float r = (camera.poseInverse * worldPos).z / camera.params.fx;
// Get virtual camera ray for splat centre and backface cull if possible
//const float3 rayOrigin = params.m_viewMatrixInverse * make_float3(0.0f,0.0f,0.0f);
//const float3 rayDir = normalize(params.m_viewMatrixInverse * params.camera.kinectDepthToSkeleton(x,y,1.0f) - rayOrigin);
//if (dot(rayDir, normal) > 0.0f) return;
// Find the virtual screen position of current point
const float3 camPos = params.m_viewMatrix * worldPos;
if (camPos.z < params.camera.m_sensorDepthWorldMin) return;
if (camPos.z > params.camera.m_sensorDepthWorldMax) return;
const uint2 screenPos = params.camera.cameraToKinectScreen(camPos);
const int upsample = min(UPSAMPLE_MAX, int((4.0f*r) * params.camera.fx / camPos.z));
// Not on screen so stop now...
if (screenPos.x - upsample >= depth.width() || screenPos.y - upsample >= depth.height()) return;
// TODO:(Nick) Check depth buffer and don't do anything if already hidden?
// TODO:(Nick) Preload point neighbors and transform to eye
const int lane = threadIdx.x % WARP_SIZE;
if (lane == 0) {
minimum[warp] = 100000000;
maximum[warp] = -100000000;
}
__syncwarp();
for (int i=lane; i<MAX_NEIGHBORS; i+=WARP_SIZE) {
const int u = (i % (2*NEIGHBOR_RADIUS+1)) - NEIGHBOR_RADIUS;
const int v = (i / (2*NEIGHBOR_RADIUS+1)) - NEIGHBOR_RADIUS;
const float3 point = params.m_viewMatrix * make_float3(tex2D<float4>(camera.points, x+u, y+v));
neighborhood_cache[warp][i] = point;
if (length(point - camPos) <= 0.04f) {
atomicMin(&minimum[warp], point.z*1000.0f);
atomicMax(&maximum[warp], point.z*1000.0f);
}
}
__syncwarp();
const float interval = (float(maximum[warp])/1000.0f - float(minimum[warp]) / 1000.0f) / float(MAX_ITERATIONS);
//if (y == 200) printf("interval: %f\n", interval);
// TODO:(Nick) Find min and max depths of neighbors to estimate z bounds
// Each thread in warp takes an upsample point and updates corresponding depth buffer.
for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
const float u = (i % upsample) - (upsample / 2);
const float v = (i / upsample) - (upsample / 2);
// Make an initial estimate of the points location
// Use centroid depth as estimate...?
//float3 nearest = ftl::cuda::screen_centroid<1>(camera.points, make_float2(screenPos.x+u, screenPos.y+v), make_int2(x,y), params, upsample);
// Use current points z as estimate
// TODO: Use min point as estimate
float3 nearest = params.camera.kinectDepthToSkeleton(screenPos.x+u,screenPos.y+v,float(minimum[warp])/1000.0f);
// Or calculate upper and lower bounds for depth and do gradient
// descent until the gradient change is too small or max iter is reached
// and depth remains within the bounds.
// How to find min and max depths?
// TODO: (Nick) Estimate depth using points plane, but needs better normals.
//float t;
//if (ftl::cuda::intersectPlane(normal, worldPos, rayOrigin, rayDir, t)) {
// Plane based estimate of surface at this pixel
//const float3 nearest = rayOrigin + rayDir * camPos.z;
// Use MLS of camera neighbor points to get more exact estimate
// Iterate until pixel is stable on the surface.
for (int k=0; k<MAX_ITERATIONS; ++k) {
// TODO:(Nick) Should perhaps use points from all cameras?
// Instead of doing each camera separately...
// If the depth already is close then it has already been done and can skip this point
const float energy = ftl::cuda::mls_point_energy<MAX_NEIGHBORS>(neighborhood_cache[warp], nearest, SPATIAL_SMOOTHING);
if (energy <= 0.0f) break;
//ftl::cuda::render_depth(depth, params, output);
// This is essentially the SDF function f(x), only the normal should be estimated also from the weights
//const float d = nearest.z + (normal.x*output.x + normal.y*output.y + normal.z*output.z);
const float d = nearest.z;
nearest = params.camera.kinectDepthToSkeleton(screenPos.x+u,screenPos.y+v,d+interval);
if (energy >= 0.1f) {
const unsigned int cx = screenPos.x+u;
const unsigned int cy = screenPos.y+v;
if (d > params.camera.m_sensorDepthWorldMin && d < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), d * 1000.0f);
}
break;
}
}
//}
}
}
#define NEIGHBOR_RADIUS_2 3
#define NEIGHBOR_WINDOW ((NEIGHBOR_RADIUS_2*2+1)*(NEIGHBOR_RADIUS_2*2+1))
#define MAX_NEIGHBORS_2 32
#define FULL_MASK 0xffffffff
__device__ inline float warpMax(float e) {
for (int i = WARP_SIZE/2; i > 0; i /= 2) {
const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
e = max(e, other);
}
return e;
}
__device__ inline float warpMin(float e) {
for (int i = WARP_SIZE/2; i > 0; i /= 2) {
const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
e = min(e, other);
}
return e;
}
#define ENERGY_THRESHOLD 0.1f
#define SMOOTHING_MULTIPLIER_A 10.0f // For surface search
#define SMOOTHING_MULTIPLIER_B 4.0f // For z contribution
#define SMOOTHING_MULTIPLIER_C 4.0f // For colour contribution
/*
* Pass 2: Determine depth buffer with enough accuracy for a visibility test in pass 2.
* These values are also used as the actual surface estimate during rendering so should
* at least be plane or sphere fitted if not MLS smoothed onto the actual surface.
*
* This version uses a previous point render as neighbour source.
*/
__global__ void dibr_visibility_principal_kernel2(TextureObject<int> point_in, TextureObject<int> depth, SplatParams params) {
__shared__ float3 neighborhood_cache[2*T_PER_BLOCK][MAX_NEIGHBORS_2];
__shared__ int minimum[2*T_PER_BLOCK];
__shared__ int maximum[2*T_PER_BLOCK];
__shared__ unsigned int nidx[2*T_PER_BLOCK];
const int tid = (threadIdx.x + threadIdx.y * blockDim.x);
const int warp = tid / WARP_SIZE; //threadIdx.x / WARP_SIZE + threadIdx.y*2;
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
// Starting point for surface minimum
float clusterBase = params.camera.m_sensorDepthWorldMin;
// Loop to a deeper surface if not on the first one selected...
while (clusterBase < params.camera.m_sensorDepthWorldMax) {
const int lane = tid % WARP_SIZE;
if (lane == 0) {
minimum[warp] = 100000000;
maximum[warp] = -100000000;
nidx[warp] = 0;
}
__syncwarp();
// Search for a valid minimum neighbour
// TODO: Should this really be minimum or the median of a depth cluster?
// cluster median seems very hard to calculate...
for (int i=lane; i<NEIGHBOR_WINDOW; i+=WARP_SIZE) {
const int u = (i % (2*NEIGHBOR_RADIUS_2+1)) - NEIGHBOR_RADIUS_2;
const int v = (i / (2*NEIGHBOR_RADIUS_2+1)) - NEIGHBOR_RADIUS_2;
const float3 point = params.camera.kinectDepthToSkeleton(x+u, y+v, float(point_in.tex2D(x+u, y+v)) / 1000.0f);
const float3 camPos = params.camera.kinectDepthToSkeleton(x, y, point.z);
// If it is close enough...
// TODO: smoothing / strength should be determined by a number of factors including:
// 1) Depth from original source
// 2) Colour contrast in underlying RGB
// 3) Estimated noise levels in depth values
if (point.z > clusterBase && point.z < params.camera.m_sensorDepthWorldMax && length(point - camPos) <= SMOOTHING_MULTIPLIER_A*(point.z / params.camera.fx)) {
atomicMin(&minimum[warp], point.z*1000.0f);
}
}
__syncwarp();
const float minDepth = float(minimum[warp])/1000.0f;
// Preload valid neighbour points from within a window. A point is valid
// if it is within a specific distance of the minimum.
// Also calculate the maximum at the same time.
// TODO: Could here do a small search in each camera? This would allow all
// points to be considered, even those masked in our depth input.
const float3 minPos = params.camera.kinectDepthToSkeleton(x, y, minDepth);
for (int i=lane; i<NEIGHBOR_WINDOW; i+=WARP_SIZE) {
const int u = (i % (2*NEIGHBOR_RADIUS_2+1)) - NEIGHBOR_RADIUS_2;
const int v = (i / (2*NEIGHBOR_RADIUS_2+1)) - NEIGHBOR_RADIUS_2;
const float3 point = params.camera.kinectDepthToSkeleton(x+u, y+v, float(point_in.tex2D(x+u, y+v)) / 1000.0f);
// If it is close enough...
if (point.z > params.camera.m_sensorDepthWorldMin && point.z < params.camera.m_sensorDepthWorldMax && length(point - minPos) <= SMOOTHING_MULTIPLIER_A*(point.z / params.camera.fx)) {
// Append to neighbour list
//unsigned int idx = atomicInc(&nidx[warp], MAX_NEIGHBORS_2-1);
unsigned int idx = atomicAdd(&nidx[warp], 1);
if (idx >= MAX_NEIGHBORS_2) break;
neighborhood_cache[warp][idx] = point;
atomicMax(&maximum[warp], point.z*1000.0f);
}
}
__syncwarp();
const float maxDepth = float(maximum[warp])/1000.0f;
const float interval = (maxDepth - minDepth) / float(MAX_ITERATIONS);
if (minDepth >= params.camera.m_sensorDepthWorldMax) return;
if (maxDepth <= params.camera.m_sensorDepthWorldMin) return;
//if (y == 200) printf("interval: %f\n", maxDepth);
// If all samples say same depth, then agree and return
// TODO: Check this is valid, since small energies should be removed...
/*if (fabs(minDepth - maxDepth) < 0.0001f) {
if (lane == 0) {
const unsigned int cx = x;
const unsigned int cy = y;
if (minDepth < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), minDepth * 1000.0f);
}
}
return;
}*/
float maxenergy = -1.0f;
float bestdepth = 0.0f;
// Search for best or threshold energy
for (int k=lane; k<MAX_ITERATIONS; k+=WARP_SIZE) {
const float3 nearest = params.camera.kinectDepthToSkeleton(x,y,minDepth+float(k)*interval);
const float myenergy = ftl::cuda::mls_point_energy<MAX_NEIGHBORS_2>(neighborhood_cache[warp], nearest, min(nidx[warp], MAX_NEIGHBORS_2), SMOOTHING_MULTIPLIER_B*(nearest.z/params.camera.fx));
const float newenergy = warpMax(max(myenergy, maxenergy));
bestdepth = (myenergy == newenergy) ? nearest.z : (newenergy > maxenergy) ? 0.0f : bestdepth;
maxenergy = newenergy;
}
// If enough energy was found and this thread was the one that found the best
// then output the depth that this energy occured at.
if (bestdepth > 0.0f && maxenergy >= ENERGY_THRESHOLD) {
//printf("E D %f %f\n", maxenergy, bestdepth);
const unsigned int cx = x;
const unsigned int cy = y;
if (bestdepth > params.camera.m_sensorDepthWorldMin && bestdepth < params.camera.m_sensorDepthWorldMax && cx < depth.width() && cy < depth.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth(cx,cy), bestdepth * 1000.0f);
//depth(cx,cy) = bestdepth * 1000.0f;
}
}
// TODO: Could the threshold depend upon the number of points? Fewer points
// due to distance is incorrect since really there may not be fewer points
// Perhaps the best option is to make it depend on depth ... really close
// and really far both has lower thresholds due to point densities. Other
// option is smoothing factor and surface distances alter with distance to
// vary the number of points used ... smoothing factor could be a multiple
// of pixel size at given distance. Density from original source is also
// an influencer of smoothing factor and thresholds. Colour contrast also
// has a weighting influence, high contrast is high certainty in the
// disparity so such points should have a high influence over choice of
// surface location.
//
// Magnitude vs dispersion factor in the energy function ...
// * Mag is certainty of surface location
// * Dispersion is how far to propagate that certainty,
if (maxenergy >= ENERGY_THRESHOLD) return;
// Move to next possible surface...
clusterBase = minDepth + SMOOTHING_MULTIPLIER_B*(minDepth / params.camera.fx);
};
}
// ===== Pass 2 and 3 : Attribute contributions ================================
__device__ inline float4 make_float4(const uchar4 &c) {
return make_float4(c.x,c.y,c.z,c.w);
}
/*
* Pass 2: Accumulate attribute contributions if the points pass a visibility test.
*/
__global__ void dibr_attribute_contrib_kernel(
TextureObject<int> depth_in,
TextureObject<float4> colour_out,
TextureObject<float4> normal_out,
TextureObject<float> contrib_out, int cam,
SplatParams params) {
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
const int tid = (threadIdx.x + threadIdx.y * blockDim.x);
//const int warp = tid / WARP_SIZE;
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
//const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y));
if (worldPos.x == MINF) return;
const float r = (camera.poseInverse * worldPos).z / camera.params.fx;
const float3 camPos = params.m_viewMatrix * worldPos;
if (camPos.z < params.camera.m_sensorDepthWorldMin) return;
if (camPos.z > params.camera.m_sensorDepthWorldMax) return;
const uint2 screenPos = params.camera.cameraToKinectScreen(camPos);
const int upsample = 8; //min(UPSAMPLE_MAX, int((5.0f*r) * params.camera.fx / camPos.z));
// Not on screen so stop now...
if (screenPos.x >= depth_in.width() || screenPos.y >= depth_in.height()) return;
// Is this point near the actual surface and therefore a contributor?
const float d = ((float)depth_in.tex2D((int)screenPos.x, (int)screenPos.y)/1000.0f);
//if (abs(d - camPos.z) > DEPTH_THRESHOLD) return;
// TODO:(Nick) Should just one thread load these to shared mem?
const float4 colour = make_float4(tex2D<uchar4>(camera.colour, x, y));
const float4 normal = tex2D<float4>(camera.normal, x, y);
// Each thread in warp takes an upsample point and updates corresponding depth buffer.
const int lane = tid % WARP_SIZE;
for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
const float u = (i % upsample) - (upsample / 2);
const float v = (i / upsample) - (upsample / 2);
// Use the depth buffer to determine this pixels 3D position in camera space
const float d = ((float)depth_in.tex2D(screenPos.x+u, screenPos.y+v)/1000.0f);
const float3 nearest = params.camera.kinectDepthToSkeleton((int)(screenPos.x+u),(int)(screenPos.y+v),d);
// What is contribution of our current point at this pixel?
const float weight = ftl::cuda::spatialWeighting(length(nearest - camPos), SMOOTHING_MULTIPLIER_C*(nearest.z/params.camera.fx));
if (screenPos.x+u < colour_out.width() && screenPos.y+v < colour_out.height() && weight > 0.0f) { // TODO: Use confidence threshold here
const float4 wcolour = colour * weight;
const float4 wnormal = normal * weight;
//printf("Z %f\n", d);
// Add this points contribution to the pixel buffer
atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v), wcolour.x);
atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v)+1, wcolour.y);
atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v)+2, wcolour.z);
atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v)+3, wcolour.w);
atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v), wnormal.x);
atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v)+1, wnormal.y);
atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v)+2, wnormal.z);
atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v)+3, wnormal.w);
atomicAdd(&contrib_out(screenPos.x+u, screenPos.y+v), weight);
}
}
}
/*
* Pass 2: Accumulate attribute contributions if the points pass a visibility test.
*/
/*__global__ void dibr_attribute_contrib_kernel(
TextureObject<int> depth_in,
TextureObject<uchar4> colour_out,
TextureObject<float4> normal_out, int numcams, SplatParams params) {
const int i = threadIdx.y*blockDim.y + threadIdx.x;
const int bx = blockIdx.x*blockDim.x;
const int by = blockIdx.y*blockDim.y;
const int x = bx + threadIdx.x;
const int y = by + threadIdx.y;
for (int j=0; j<numcams; ++j) {
const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[j];
float3 worldPos = make_float3(tex2D<float4>(camera.points, x, y));
float r = (camera.poseInverse * worldPos).z;
//if (ftl::cuda::mls_point_surface<3>(camera.points, make_int2(x,y), worldPos, 0.02f) < 0.001f) continue;
if (worldPos.x == MINF) continue;
const float3 camPos = params.m_viewMatrix * worldPos;
// Estimate upsample factor using ratio of source depth and output depth
const int upsample = min(15, (int)(UPSAMPLE_FACTOR * (r / camPos.z))+1);
const float upfactor = 2.0f / (float)(upsample);
for (int v=0; v<upsample; ++v) {
for (int u=0; u<upsample; ++u) {
float3 point;
const ftl::cuda::fragment nearest = ftl::cuda::upsampled_point(camera.points, camera.normal, camera.colour,
make_float2((float)x-1.0f+u*upfactor,(float)y-1.0f+v*upfactor));
//if (ftl::cuda::mls_point_surface<3>(camera.points, make_int2(x,y), nearest, point, 0.02f) < 0.001f) continue;
ftl::cuda::render_fragment(depth_in, normal_out, colour_out, params, nearest);
}
}
}
}*/
__global__ void dibr_normalise_kernel(
TextureObject<float4> colour_in,
TextureObject<uchar4> colour_out,
TextureObject<float4> normals,
TextureObject<float> contribs) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < colour_in.width() && y < colour_in.height()) {
const float4 colour = colour_in.tex2D((int)x,(int)y);
const float4 normal = normals.tex2D((int)x,(int)y);
const float contrib = contribs.tex2D((int)x,(int)y);
if (contrib > 0.0f) {
colour_out(x,y) = make_uchar4(colour.x / contrib, colour.y / contrib, colour.z / contrib, 0);
normals(x,y) = normal / contrib;
}
}
}
void ftl::cuda::dibr(const TextureObject<int> &depth_out,
const TextureObject<uchar4> &colour_out,
const TextureObject<float4> &normal_out,
const TextureObject<float> &confidence_out,
const TextureObject<float4> &tmp_colour,
const TextureObject<int> &tmp_depth,
int numcams,
const SplatParams ¶ms,
cudaStream_t stream) {
const dim3 sgridSize((depth_out.width() + 2 - 1)/2, (depth_out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 sblockSize(2*WARP_SIZE, T_PER_BLOCK);
const dim3 gridSize((depth_out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
clearColourKernel<<<gridSize, blockSize, 0, stream>>>(colour_out);
ftl::cuda::clear_to_zero(confidence_out, stream);
ftl::cuda::clear_colour(tmp_colour, stream);
ftl::cuda::clear_colour(normal_out, stream);
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
//int i=3;
bool noSplatting = params.m_flags & ftl::render::kNoSplatting;
// Pass 1, gather and upsample depth maps
if (params.m_flags & ftl::render::kNoUpsampling) {
for (int i=0; i<numcams; ++i)
dibr_merge_kernel<<<gridSize, blockSize, 0, stream>>>((noSplatting) ? depth_out : tmp_depth, i, params);
} else {
for (int i=0; i<numcams; ++i)
dibr_merge_upsample_kernel<<<sgridSize, sblockSize, 0, stream>>>((noSplatting) ? depth_out : tmp_depth, i, params);
}
if (noSplatting) {
// Pass 3, accumulate all point contributions to pixels
for (int i=0; i<numcams; ++i)
dibr_attribute_contrib_kernel<<<sgridSize, sblockSize, 0, stream>>>(depth_out, tmp_colour, normal_out, confidence_out, i, params);
} else {
// Pass 2
dibr_visibility_principal_kernel2<<<sgridSize, sblockSize, 0, stream>>>(tmp_depth, depth_out, params);
// Pass 3, accumulate all point contributions to pixels
for (int i=0; i<numcams; ++i)
dibr_attribute_contrib_kernel<<<sgridSize, sblockSize, 0, stream>>>(depth_out, tmp_colour, normal_out, confidence_out, i, params);
}
// Pass 2
//dibr_visibility_principal_kernel2<<<sgridSize, sblockSize, 0, stream>>>(tmp_depth, depth_out, params);
// Pass 2, merge a depth map from each camera.
//for (int i=0; i<numcams; ++i)
// dibr_visibility_principal_kernel<<<sgridSize, sblockSize, 0, stream>>>(depth_out, i, params);
// Pass 4, normalise contributions
dibr_normalise_kernel<<<gridSize, blockSize, 0, stream>>>(tmp_colour, colour_out, normal_out, confidence_out);
cudaSafeCall( cudaGetLastError() );
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
}
void ftl::cuda::dibr_raw(const TextureObject<int> &depth_out,
int numcams, const SplatParams ¶ms, cudaStream_t stream) {
const dim3 gridSize((depth_out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
//dibr_depthmap_direct_kernel<<<gridSize, blockSize, 0, stream>>>(depth_out, numcams, params);
cudaSafeCall( cudaGetLastError() );
#ifdef _DEBUG
cudaSafeCall(cudaDeviceSynchronize());
#endif
}
|
bad84fee06d586d802e237ffa3062bdfbe83324e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "internal_shared.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
#ifndef CV_PI
#define CV_PI 3.1415926535897932384626433832795f
#endif
//////////////////////////////////////////////////////////////////////////////////////
// Cart <-> Polar
namespace cv { namespace gpu { namespace mathfunc
{
struct Nothing
{
static __device__ __forceinline__ void calc(int, int, float, float, float*, size_t, float)
{
}
};
struct Magnitude
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float)
{
dst[y * dst_step + x] = sqrtf(x_data * x_data + y_data * y_data);
}
};
struct MagnitudeSqr
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float)
{
dst[y * dst_step + x] = x_data * x_data + y_data * y_data;
}
};
struct Atan2
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float scale)
{
float angle = atan2f(y_data, x_data);
angle += (angle < 0) * 2.0 * CV_PI;
dst[y * dst_step + x] = scale * angle;
}
};
template <typename Mag, typename Angle>
__global__ void cartToPolar(const float* xptr, size_t x_step, const float* yptr, size_t y_step,
float* mag, size_t mag_step, float* angle, size_t angle_step, float scale, int width, int height)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height)
{
float x_data = xptr[y * x_step + x];
float y_data = yptr[y * y_step + x];
Mag::calc(x, y, x_data, y_data, mag, mag_step, scale);
Angle::calc(x, y, x_data, y_data, angle, angle_step, scale);
}
}
struct NonEmptyMag
{
static __device__ __forceinline__ float get(const float* mag, size_t mag_step, int x, int y)
{
return mag[y * mag_step + x];
}
};
struct EmptyMag
{
static __device__ __forceinline__ float get(const float*, size_t, int, int)
{
return 1.0f;
}
};
template <typename Mag>
__global__ void polarToCart(const float* mag, size_t mag_step, const float* angle, size_t angle_step, float scale,
float* xptr, size_t x_step, float* yptr, size_t y_step, int width, int height)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height)
{
float mag_data = Mag::get(mag, mag_step, x, y);
float angle_data = angle[y * angle_step + x];
float sin_a, cos_a;
sincosf(scale * angle_data, &sin_a, &cos_a);
xptr[y * x_step + x] = mag_data * cos_a;
yptr[y * y_step + x] = mag_data * sin_a;
}
}
template <typename Mag, typename Angle>
void cartToPolar_caller(const DevMem2Df& x, const DevMem2Df& y, const DevMem2Df& mag, const DevMem2Df& angle, bool angleInDegrees, hipStream_t stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(x.cols, threads.x);
grid.y = divUp(x.rows, threads.y);
const float scale = angleInDegrees ? (float)(180.0f / CV_PI) : 1.f;
hipLaunchKernelGGL(( cartToPolar<Mag, Angle>), dim3(grid), dim3(threads), 0, stream,
x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(),
mag.data, mag.step/mag.elemSize(), angle.data, angle.step/angle.elemSize(), scale, x.cols, x.rows);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void cartToPolar_gpu(const DevMem2Df& x, const DevMem2Df& y, const DevMem2Df& mag, bool magSqr, const DevMem2Df& angle, bool angleInDegrees, hipStream_t stream)
{
typedef void (*caller_t)(const DevMem2Df& x, const DevMem2Df& y, const DevMem2Df& mag, const DevMem2Df& angle, bool angleInDegrees, hipStream_t stream);
static const caller_t callers[2][2][2] =
{
{
{
cartToPolar_caller<Magnitude, Atan2>,
cartToPolar_caller<Magnitude, Nothing>
},
{
cartToPolar_caller<MagnitudeSqr, Atan2>,
cartToPolar_caller<MagnitudeSqr, Nothing>,
}
},
{
{
cartToPolar_caller<Nothing, Atan2>,
cartToPolar_caller<Nothing, Nothing>
},
{
cartToPolar_caller<Nothing, Atan2>,
cartToPolar_caller<Nothing, Nothing>,
}
}
};
callers[mag.data == 0][magSqr][angle.data == 0](x, y, mag, angle, angleInDegrees, stream);
}
template <typename Mag>
void polarToCart_caller(const DevMem2Df& mag, const DevMem2Df& angle, const DevMem2Df& x, const DevMem2Df& y, bool angleInDegrees, hipStream_t stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(mag.cols, threads.x);
grid.y = divUp(mag.rows, threads.y);
const float scale = angleInDegrees ? (float)(CV_PI / 180.0f) : 1.0f;
hipLaunchKernelGGL(( polarToCart<Mag>), dim3(grid), dim3(threads), 0, stream, mag.data, mag.step/mag.elemSize(),
angle.data, angle.step/angle.elemSize(), scale, x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(), mag.cols, mag.rows);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void polarToCart_gpu(const DevMem2Df& mag, const DevMem2Df& angle, const DevMem2Df& x, const DevMem2Df& y, bool angleInDegrees, hipStream_t stream)
{
typedef void (*caller_t)(const DevMem2Df& mag, const DevMem2Df& angle, const DevMem2Df& x, const DevMem2Df& y, bool angleInDegrees, hipStream_t stream);
static const caller_t callers[2] =
{
polarToCart_caller<NonEmptyMag>,
polarToCart_caller<EmptyMag>
};
callers[mag.data == 0](mag, angle, x, y, angleInDegrees, stream);
}
}}}
| bad84fee06d586d802e237ffa3062bdfbe83324e.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "internal_shared.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
#ifndef CV_PI
#define CV_PI 3.1415926535897932384626433832795f
#endif
//////////////////////////////////////////////////////////////////////////////////////
// Cart <-> Polar
namespace cv { namespace gpu { namespace mathfunc
{
struct Nothing
{
static __device__ __forceinline__ void calc(int, int, float, float, float*, size_t, float)
{
}
};
struct Magnitude
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float)
{
dst[y * dst_step + x] = sqrtf(x_data * x_data + y_data * y_data);
}
};
struct MagnitudeSqr
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float)
{
dst[y * dst_step + x] = x_data * x_data + y_data * y_data;
}
};
struct Atan2
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float scale)
{
float angle = atan2f(y_data, x_data);
angle += (angle < 0) * 2.0 * CV_PI;
dst[y * dst_step + x] = scale * angle;
}
};
template <typename Mag, typename Angle>
__global__ void cartToPolar(const float* xptr, size_t x_step, const float* yptr, size_t y_step,
float* mag, size_t mag_step, float* angle, size_t angle_step, float scale, int width, int height)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height)
{
float x_data = xptr[y * x_step + x];
float y_data = yptr[y * y_step + x];
Mag::calc(x, y, x_data, y_data, mag, mag_step, scale);
Angle::calc(x, y, x_data, y_data, angle, angle_step, scale);
}
}
struct NonEmptyMag
{
static __device__ __forceinline__ float get(const float* mag, size_t mag_step, int x, int y)
{
return mag[y * mag_step + x];
}
};
struct EmptyMag
{
static __device__ __forceinline__ float get(const float*, size_t, int, int)
{
return 1.0f;
}
};
template <typename Mag>
__global__ void polarToCart(const float* mag, size_t mag_step, const float* angle, size_t angle_step, float scale,
float* xptr, size_t x_step, float* yptr, size_t y_step, int width, int height)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height)
{
float mag_data = Mag::get(mag, mag_step, x, y);
float angle_data = angle[y * angle_step + x];
float sin_a, cos_a;
sincosf(scale * angle_data, &sin_a, &cos_a);
xptr[y * x_step + x] = mag_data * cos_a;
yptr[y * y_step + x] = mag_data * sin_a;
}
}
template <typename Mag, typename Angle>
void cartToPolar_caller(const DevMem2Df& x, const DevMem2Df& y, const DevMem2Df& mag, const DevMem2Df& angle, bool angleInDegrees, cudaStream_t stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(x.cols, threads.x);
grid.y = divUp(x.rows, threads.y);
const float scale = angleInDegrees ? (float)(180.0f / CV_PI) : 1.f;
cartToPolar<Mag, Angle><<<grid, threads, 0, stream>>>(
x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(),
mag.data, mag.step/mag.elemSize(), angle.data, angle.step/angle.elemSize(), scale, x.cols, x.rows);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void cartToPolar_gpu(const DevMem2Df& x, const DevMem2Df& y, const DevMem2Df& mag, bool magSqr, const DevMem2Df& angle, bool angleInDegrees, cudaStream_t stream)
{
typedef void (*caller_t)(const DevMem2Df& x, const DevMem2Df& y, const DevMem2Df& mag, const DevMem2Df& angle, bool angleInDegrees, cudaStream_t stream);
static const caller_t callers[2][2][2] =
{
{
{
cartToPolar_caller<Magnitude, Atan2>,
cartToPolar_caller<Magnitude, Nothing>
},
{
cartToPolar_caller<MagnitudeSqr, Atan2>,
cartToPolar_caller<MagnitudeSqr, Nothing>,
}
},
{
{
cartToPolar_caller<Nothing, Atan2>,
cartToPolar_caller<Nothing, Nothing>
},
{
cartToPolar_caller<Nothing, Atan2>,
cartToPolar_caller<Nothing, Nothing>,
}
}
};
callers[mag.data == 0][magSqr][angle.data == 0](x, y, mag, angle, angleInDegrees, stream);
}
template <typename Mag>
void polarToCart_caller(const DevMem2Df& mag, const DevMem2Df& angle, const DevMem2Df& x, const DevMem2Df& y, bool angleInDegrees, cudaStream_t stream)
{
dim3 threads(16, 16, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(mag.cols, threads.x);
grid.y = divUp(mag.rows, threads.y);
const float scale = angleInDegrees ? (float)(CV_PI / 180.0f) : 1.0f;
polarToCart<Mag><<<grid, threads, 0, stream>>>(mag.data, mag.step/mag.elemSize(),
angle.data, angle.step/angle.elemSize(), scale, x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(), mag.cols, mag.rows);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void polarToCart_gpu(const DevMem2Df& mag, const DevMem2Df& angle, const DevMem2Df& x, const DevMem2Df& y, bool angleInDegrees, cudaStream_t stream)
{
typedef void (*caller_t)(const DevMem2Df& mag, const DevMem2Df& angle, const DevMem2Df& x, const DevMem2Df& y, bool angleInDegrees, cudaStream_t stream);
static const caller_t callers[2] =
{
polarToCart_caller<NonEmptyMag>,
polarToCart_caller<EmptyMag>
};
callers[mag.data == 0](mag, angle, x, y, angleInDegrees, stream);
}
}}}
|
6e9d869a4b830f400ab204456389f2d152a68e4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void create_combined_escape_newline_index(char *file, long n, bool *escape_carry_index, int *newline_count_index, long *escape_index, long escape_index_size, long *newline_index) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// We want to always calculate on 64-character boundaries, such that we can put
// all bits of 64 characters into 1 long.
long normal_chars_per_thread = (n+stride-1) / stride;
long chars_per_thread = ((normal_chars_per_thread + 64 - 1) / 64) * 64;
long start = index * chars_per_thread;
long end = start + chars_per_thread;
// Get the previous carry
bool carry = index == 0 ? false : escape_carry_index[index - 1];
// These are used for checking that not everything is escaped, because
// we cannot deal with that scenario since that requires depending on all
// previous carries, rather than just the previous carry.
int escape_count = 0;
int total_count = end - start;
int final_loop_iteration = end;
if (n < end) {
final_loop_iteration = n;
}
int newline_offset = newline_count_index[index];
for (long i = start; i < final_loop_iteration; i += 1) {
char value = file[i];
// If our last carry was 1, then we add it to the bit index here.
// We do it here because we are actually setting the character that
// is escaped, and not the escape character itself.
if (carry == 1) {
escape_index[i / 64] |= (1L << (i % 64));
}
if (value == '\\') {
escape_count++;
carry = carry ^ 1;
} else {
carry = 0;
}
if (value == '\n') {
newline_index[newline_offset++] = i;
}
}
// We do not expect to see a run of all backslashes
assert(escape_count != total_count);
}
| 6e9d869a4b830f400ab204456389f2d152a68e4a.cu | __global__ void create_combined_escape_newline_index(char *file, long n, bool *escape_carry_index, int *newline_count_index, long *escape_index, long escape_index_size, long *newline_index) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// We want to always calculate on 64-character boundaries, such that we can put
// all bits of 64 characters into 1 long.
long normal_chars_per_thread = (n+stride-1) / stride;
long chars_per_thread = ((normal_chars_per_thread + 64 - 1) / 64) * 64;
long start = index * chars_per_thread;
long end = start + chars_per_thread;
// Get the previous carry
bool carry = index == 0 ? false : escape_carry_index[index - 1];
// These are used for checking that not everything is escaped, because
// we cannot deal with that scenario since that requires depending on all
// previous carries, rather than just the previous carry.
int escape_count = 0;
int total_count = end - start;
int final_loop_iteration = end;
if (n < end) {
final_loop_iteration = n;
}
int newline_offset = newline_count_index[index];
for (long i = start; i < final_loop_iteration; i += 1) {
char value = file[i];
// If our last carry was 1, then we add it to the bit index here.
// We do it here because we are actually setting the character that
// is escaped, and not the escape character itself.
if (carry == 1) {
escape_index[i / 64] |= (1L << (i % 64));
}
if (value == '\\') {
escape_count++;
carry = carry ^ 1;
} else {
carry = 0;
}
if (value == '\n') {
newline_index[newline_offset++] = i;
}
}
// We do not expect to see a run of all backslashes
assert(escape_count != total_count);
}
|
e51072af8a0b0c30c23e18ccf7287ccaa321371e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#define PERR(call) \
if (call) {\
fprintf(stderr, "%s:%d Error [%s] on "#call"\n", __FILE__, __LINE__,\
hipGetErrorString(hipGetLastError()));\
exit(1);\
}
#define ERRCHECK \
if (hipPeekAtLastError()) { \
fprintf(stderr, "%s:%d Error [%s]\n", __FILE__, __LINE__,\
hipGetErrorString(hipGetLastError()));\
exit(1);\
}
__global__ void
inv_kernel(float *a_i, float *c_o, int n)
{
int *p = (int *)malloc(3*sizeof(int));
int *info = (int *)malloc(sizeof(int));
int batch;
hipblasHandle_t hdl;
hipblasStatus_t status = hipblasCreate(&hdl);
printf("handle %d n = %d\n", status, n);
info[0] = 0;
batch = 1;
float **a = (float **)malloc(sizeof(float *));
*a = a_i;
const float **aconst = (const float **)a;
float **c = (float **)malloc(sizeof(float *));
*c = c_o;
status = hipblasSgetrfBatched(hdl, n, a, n, p, info, batch);
__syncthreads();
printf("rf %d info %d\n", status, info[0]);
status = hipblasSgetriBatched(hdl, n, aconst, n, p,
c, n, info, batch);
__syncthreads();
printf("ri %d info %d\n", status, info[0]);
hipblasDestroy(hdl);
printf("done\n");
}
static void
run_inv(float *in, float *out, int n)
{
float *a_d, *c_d;
PERR(hipMalloc(&a_d, n*n*sizeof(float)));
PERR(hipMalloc(&c_d, n*n*sizeof(float)));
PERR(hipMemcpy(a_d, in, n*n*sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( inv_kernel), dim3(1), dim3(1), 0, 0, a_d, c_d, n);
hipDeviceSynchronize();
ERRCHECK;
PERR(hipMemcpy(out, c_d, n*n*sizeof(float), hipMemcpyDeviceToHost));
PERR(hipFree(a_d));
PERR(hipFree(c_d));
}
int
main(int argc, char **argv)
{
float c[9];
float a[] = {
1, 2, 3,
0, 4, 5,
1, 0, 6 };
run_inv(a, c, 3);
for (int i = 0; i < 3; i++){
for (int j = 0; j < 3; j++) printf("%f, ",c[(3*i)+j]);
printf("\n");}
return 0;
}
| e51072af8a0b0c30c23e18ccf7287ccaa321371e.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#define PERR(call) \
if (call) {\
fprintf(stderr, "%s:%d Error [%s] on "#call"\n", __FILE__, __LINE__,\
cudaGetErrorString(cudaGetLastError()));\
exit(1);\
}
#define ERRCHECK \
if (cudaPeekAtLastError()) { \
fprintf(stderr, "%s:%d Error [%s]\n", __FILE__, __LINE__,\
cudaGetErrorString(cudaGetLastError()));\
exit(1);\
}
__global__ void
inv_kernel(float *a_i, float *c_o, int n)
{
int *p = (int *)malloc(3*sizeof(int));
int *info = (int *)malloc(sizeof(int));
int batch;
cublasHandle_t hdl;
cublasStatus_t status = cublasCreate_v2(&hdl);
printf("handle %d n = %d\n", status, n);
info[0] = 0;
batch = 1;
float **a = (float **)malloc(sizeof(float *));
*a = a_i;
const float **aconst = (const float **)a;
float **c = (float **)malloc(sizeof(float *));
*c = c_o;
status = cublasSgetrfBatched(hdl, n, a, n, p, info, batch);
__syncthreads();
printf("rf %d info %d\n", status, info[0]);
status = cublasSgetriBatched(hdl, n, aconst, n, p,
c, n, info, batch);
__syncthreads();
printf("ri %d info %d\n", status, info[0]);
cublasDestroy_v2(hdl);
printf("done\n");
}
static void
run_inv(float *in, float *out, int n)
{
float *a_d, *c_d;
PERR(cudaMalloc(&a_d, n*n*sizeof(float)));
PERR(cudaMalloc(&c_d, n*n*sizeof(float)));
PERR(cudaMemcpy(a_d, in, n*n*sizeof(float), cudaMemcpyHostToDevice));
inv_kernel<<<1, 1>>>(a_d, c_d, n);
cudaDeviceSynchronize();
ERRCHECK;
PERR(cudaMemcpy(out, c_d, n*n*sizeof(float), cudaMemcpyDeviceToHost));
PERR(cudaFree(a_d));
PERR(cudaFree(c_d));
}
int
main(int argc, char **argv)
{
float c[9];
float a[] = {
1, 2, 3,
0, 4, 5,
1, 0, 6 };
run_inv(a, c, 3);
for (int i = 0; i < 3; i++){
for (int j = 0; j < 3; j++) printf("%f, ",c[(3*i)+j]);
printf("\n");}
return 0;
}
|
0ca7bb3286ae706631bc348872eb87c766d22e3d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <iostream>
float cpu_array [25];
float cpu_output_array [25];
float *gpu_array_A;
float *gpu_array_B;
float *gpu_output_array;
const int mat_width = 5;
dim3 dimBlock(mat_width, mat_width);
dim3 dimGrid(1, 1);
void initCuda(int width) {
hipMalloc((void**)&gpu_array_A, width*width*sizeof(float));
hipMemcpy(gpu_array_A, cpu_array, width*width*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&gpu_array_B, width*width*sizeof(float));
hipMemcpy(gpu_array_B, cpu_array, width*width*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&gpu_output_array, width*width*sizeof(float));
}
__global__ void mat_add (float* Ad, float* Bd, float* Pd, int width) {
int index = threadIdx.y * width + threadIdx.x;
Pd[index] = Ad[index] + Bd[index];
}
__global__ void mat_sub (float* Ad, float* Bd, float* Pd, int width) {
int index = threadIdx.y * width + threadIdx.x;
Pd[index] = Ad[index] - Bd[index];
}
__global__ void mat_mult (float* Ad, float* Bd, float* Pd, int width) {
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0;
for (int k = 0; k < width; k++) {
Pvalue += Ad[ty * width + k] * Bd[k * width + tx];
}
Pd[ty * width + tx] = Pvalue;
}
void cpu_mat_add (float* A, float* B, float* P, int width) {
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
P[j * width + i] = A[j * width + i] + B[j * width + i];
}
}
}
void cpu_mat_sub (float* A, float* B, float* P, int width) {
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
P[j * width + i] = A[j * width + i] - B[j * width + i];
}
}
}
/***
* Simple helper function for printing a matrix.
***/
void cpu_mat_mult (float* A, float* B, float* P, int width) {
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
float Psum = 0;
for (int k = 0; k < width; k++) {
Psum += A[j * width + k] * B[k * width + i];
}
P[j * width + i] = Psum;
}
}
}
/***
* Simple helper function for printing a matrix.
***/
void printMatrix (float* M, int width) {
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
std::cout << cpu_output_array[i * width + j] << " ";
}
std::cout << std::endl;
}
}
int main(int argc, char** argv) {
for (int i = 0; i < 25; i++) {
cpu_array[i] = i;
}
initCuda(mat_width);
hipLaunchKernelGGL(( mat_add), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_array_A, gpu_array_B, gpu_output_array, mat_width);
hipMemcpy(cpu_output_array, gpu_output_array, mat_width*mat_width*sizeof(float), hipMemcpyDeviceToHost);
printMatrix(cpu_output_array, mat_width);
cpu_mat_add(cpu_array, cpu_array, cpu_output_array, mat_width);
printMatrix(cpu_output_array, mat_width);
hipLaunchKernelGGL(( mat_sub), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_array_A, gpu_array_B, gpu_output_array, mat_width);
hipMemcpy(cpu_output_array, gpu_output_array, mat_width*mat_width*sizeof(float), hipMemcpyDeviceToHost);
printMatrix(cpu_output_array, mat_width);
cpu_mat_sub(cpu_array, cpu_array, cpu_output_array, mat_width);
printMatrix(cpu_output_array, mat_width);
hipLaunchKernelGGL(( mat_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_array_A, gpu_array_B, gpu_output_array, mat_width);
hipMemcpy(cpu_output_array, gpu_output_array, mat_width*mat_width*sizeof(float), hipMemcpyDeviceToHost);
printMatrix(cpu_output_array, mat_width);
cpu_mat_mult(cpu_array, cpu_array, cpu_output_array, mat_width);
printMatrix(cpu_output_array, mat_width);
int a;
std::cin>>a;
} | 0ca7bb3286ae706631bc348872eb87c766d22e3d.cu | #include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <iostream>
float cpu_array [25];
float cpu_output_array [25];
float *gpu_array_A;
float *gpu_array_B;
float *gpu_output_array;
const int mat_width = 5;
dim3 dimBlock(mat_width, mat_width);
dim3 dimGrid(1, 1);
void initCuda(int width) {
cudaMalloc((void**)&gpu_array_A, width*width*sizeof(float));
cudaMemcpy(gpu_array_A, cpu_array, width*width*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_array_B, width*width*sizeof(float));
cudaMemcpy(gpu_array_B, cpu_array, width*width*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_output_array, width*width*sizeof(float));
}
__global__ void mat_add (float* Ad, float* Bd, float* Pd, int width) {
int index = threadIdx.y * width + threadIdx.x;
Pd[index] = Ad[index] + Bd[index];
}
__global__ void mat_sub (float* Ad, float* Bd, float* Pd, int width) {
int index = threadIdx.y * width + threadIdx.x;
Pd[index] = Ad[index] - Bd[index];
}
__global__ void mat_mult (float* Ad, float* Bd, float* Pd, int width) {
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0;
for (int k = 0; k < width; k++) {
Pvalue += Ad[ty * width + k] * Bd[k * width + tx];
}
Pd[ty * width + tx] = Pvalue;
}
void cpu_mat_add (float* A, float* B, float* P, int width) {
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
P[j * width + i] = A[j * width + i] + B[j * width + i];
}
}
}
void cpu_mat_sub (float* A, float* B, float* P, int width) {
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
P[j * width + i] = A[j * width + i] - B[j * width + i];
}
}
}
/***
* Simple helper function for printing a matrix.
***/
void cpu_mat_mult (float* A, float* B, float* P, int width) {
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
float Psum = 0;
for (int k = 0; k < width; k++) {
Psum += A[j * width + k] * B[k * width + i];
}
P[j * width + i] = Psum;
}
}
}
/***
* Simple helper function for printing a matrix.
***/
void printMatrix (float* M, int width) {
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
std::cout << cpu_output_array[i * width + j] << " ";
}
std::cout << std::endl;
}
}
int main(int argc, char** argv) {
for (int i = 0; i < 25; i++) {
cpu_array[i] = i;
}
initCuda(mat_width);
mat_add<<<dimGrid, dimBlock>>>(gpu_array_A, gpu_array_B, gpu_output_array, mat_width);
cudaMemcpy(cpu_output_array, gpu_output_array, mat_width*mat_width*sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(cpu_output_array, mat_width);
cpu_mat_add(cpu_array, cpu_array, cpu_output_array, mat_width);
printMatrix(cpu_output_array, mat_width);
mat_sub<<<dimGrid, dimBlock>>>(gpu_array_A, gpu_array_B, gpu_output_array, mat_width);
cudaMemcpy(cpu_output_array, gpu_output_array, mat_width*mat_width*sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(cpu_output_array, mat_width);
cpu_mat_sub(cpu_array, cpu_array, cpu_output_array, mat_width);
printMatrix(cpu_output_array, mat_width);
mat_mult<<<dimGrid, dimBlock>>>(gpu_array_A, gpu_array_B, gpu_output_array, mat_width);
cudaMemcpy(cpu_output_array, gpu_output_array, mat_width*mat_width*sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(cpu_output_array, mat_width);
cpu_mat_mult(cpu_array, cpu_array, cpu_output_array, mat_width);
printMatrix(cpu_output_array, mat_width);
int a;
std::cin>>a;
} |
381320462415981ce528d2f77c76085bc207f08c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Test for linking with CUDA's libdevice as outlined in
// http://llvm.org/docs/NVPTXUsage.html#linking-with-libdevice
//
// REQUIRES: nvptx-registered-target
//
// Prepare bitcode file to link with
// RUN: %clang_cc1 -triple nvptx-unknown-cuda -emit-llvm-bc -o %t.bc \
// RUN: %S/Inputs/device-code.ll
//
// Make sure function in device-code gets linked in and internalized.
// RUN: %clang_cc1 -triple nvptx-unknown-cuda -fcuda-is-device \
// RUN: -mlink-bitcode-file %t.bc -fcuda-uses-libdevice -emit-llvm \
// RUN: -disable-llvm-passes -o - %s \
// RUN: | FileCheck %s -check-prefix CHECK-IR
//
// Make sure function in device-code gets linked but is not internalized
// without -fcuda-uses-libdevice
// RUN: %clang_cc1 -triple nvptx-unknown-cuda -fcuda-is-device \
// RUN: -mlink-bitcode-file %t.bc -emit-llvm \
// RUN: -disable-llvm-passes -o - %s \
// RUN: | FileCheck %s -check-prefix CHECK-IR-NLD
//
// Make sure NVVMReflect pass is enabled in NVPTX back-end.
// RUN: %clang_cc1 -triple nvptx-unknown-cuda -fcuda-is-device \
// RUN: -mlink-bitcode-file %t.bc -fcuda-uses-libdevice -S -o /dev/null %s \
// RUN: -backend-option -debug-pass=Structure 2>&1 \
// RUN: | FileCheck %s -check-prefix CHECK-REFLECT
#include "Inputs/cuda.h"
__device__ float device_mul_or_add(float a, float b);
extern "C" __device__ double __nv_sin(double x);
extern "C" __device__ double __nv_exp(double x);
// CHECK-IR-LABEL: define void @_Z26should_not_be_internalizedPf(
// CHECK-PTX-LABEL: .visible .func _Z26should_not_be_internalizedPf(
__device__ void should_not_be_internalized(float *data) {}
// Make sure kernel call has not been internalized.
// CHECK-IR-LABEL: define void @_Z6kernelPfS_
// CHECK-PTX-LABEL: .visible .entry _Z6kernelPfS_(
__global__ __attribute__((used)) void kernel(float *out, float *in) {
*out = device_mul_or_add(in[0], in[1]);
*out += __nv_exp(__nv_sin(*out));
should_not_be_internalized(out);
}
// Make sure device_mul_or_add() is present in IR, is internal and
// calls __nvvm_reflect().
// CHECK-IR-LABEL: define internal float @_Z17device_mul_or_addff(
// CHECK-IR-NLD-LABEL: define float @_Z17device_mul_or_addff(
// CHECK-IR: call i32 @__nvvm_reflect
// CHECK-IR: ret float
// Verify that NVVMReflect pass is among the passes run by NVPTX back-end.
// CHECK-REFLECT: Replace occurrences of __nvvm_reflect() calls with 0/1
| 381320462415981ce528d2f77c76085bc207f08c.cu | // Test for linking with CUDA's libdevice as outlined in
// http://llvm.org/docs/NVPTXUsage.html#linking-with-libdevice
//
// REQUIRES: nvptx-registered-target
//
// Prepare bitcode file to link with
// RUN: %clang_cc1 -triple nvptx-unknown-cuda -emit-llvm-bc -o %t.bc \
// RUN: %S/Inputs/device-code.ll
//
// Make sure function in device-code gets linked in and internalized.
// RUN: %clang_cc1 -triple nvptx-unknown-cuda -fcuda-is-device \
// RUN: -mlink-bitcode-file %t.bc -fcuda-uses-libdevice -emit-llvm \
// RUN: -disable-llvm-passes -o - %s \
// RUN: | FileCheck %s -check-prefix CHECK-IR
//
// Make sure function in device-code gets linked but is not internalized
// without -fcuda-uses-libdevice
// RUN: %clang_cc1 -triple nvptx-unknown-cuda -fcuda-is-device \
// RUN: -mlink-bitcode-file %t.bc -emit-llvm \
// RUN: -disable-llvm-passes -o - %s \
// RUN: | FileCheck %s -check-prefix CHECK-IR-NLD
//
// Make sure NVVMReflect pass is enabled in NVPTX back-end.
// RUN: %clang_cc1 -triple nvptx-unknown-cuda -fcuda-is-device \
// RUN: -mlink-bitcode-file %t.bc -fcuda-uses-libdevice -S -o /dev/null %s \
// RUN: -backend-option -debug-pass=Structure 2>&1 \
// RUN: | FileCheck %s -check-prefix CHECK-REFLECT
#include "Inputs/cuda.h"
__device__ float device_mul_or_add(float a, float b);
extern "C" __device__ double __nv_sin(double x);
extern "C" __device__ double __nv_exp(double x);
// CHECK-IR-LABEL: define void @_Z26should_not_be_internalizedPf(
// CHECK-PTX-LABEL: .visible .func _Z26should_not_be_internalizedPf(
__device__ void should_not_be_internalized(float *data) {}
// Make sure kernel call has not been internalized.
// CHECK-IR-LABEL: define void @_Z6kernelPfS_
// CHECK-PTX-LABEL: .visible .entry _Z6kernelPfS_(
__global__ __attribute__((used)) void kernel(float *out, float *in) {
*out = device_mul_or_add(in[0], in[1]);
*out += __nv_exp(__nv_sin(*out));
should_not_be_internalized(out);
}
// Make sure device_mul_or_add() is present in IR, is internal and
// calls __nvvm_reflect().
// CHECK-IR-LABEL: define internal float @_Z17device_mul_or_addff(
// CHECK-IR-NLD-LABEL: define float @_Z17device_mul_or_addff(
// CHECK-IR: call i32 @__nvvm_reflect
// CHECK-IR: ret float
// Verify that NVVMReflect pass is among the passes run by NVPTX back-end.
// CHECK-REFLECT: Replace occurrences of __nvvm_reflect() calls with 0/1
|
97ff81a15fce7b7211f254e33c6f307ca043ec97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
template <typename Dtype, bool COUNT_INCLUDE_PAD>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
if(COUNT_INCLUDE_PAD)
top_data[index] = aveval / pool_size;
else
top_data[index] = aveval / ((hend - hstart) * (wend - wstart));
}
}
void THNN_CudaSpatialAveragePooling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad)
{
THCUNN_assertSameGPU(state, 2, input, output);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
long nInputCols, nInputRows, nInputPlane, batchSize;
long nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
THArgCheck(nInputCols >= kW - 2*padW && nInputRows >= kH - 2*padH, 2, "input image smaller than kernel size");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size");
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCudaTensor_newContiguous(state, input);
float* input_data = THCudaTensor_data(state, input);
THCudaTensor_resize4d(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
float* output_data = THCudaTensor_data(state, output);
int count = THCudaTensor_nElement(state, output);
if(count_include_pad)
hipLaunchKernelGGL(( AvePoolForward<float, true>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
else
hipLaunchKernelGGL(( AvePoolForward<float, false>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
if(input->nDimension == 3)
THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_free(state, input);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialAveragePooling.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
}
template <typename Dtype, bool COUNT_INCLUDE_PAD>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if(COUNT_INCLUDE_PAD)
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
else
gradient += top_diff_slice[ph * pooled_width + pw] / ((hend - hstart) * (wend - wstart));
}
}
bottom_diff[index] = gradient;
}
}
void THNN_CudaSpatialAveragePooling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
long nInputCols, nInputRows, nInputPlane, batchSize;
long nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
THCudaTensor_resizeAs(state, gradInput, input);
int count = THCudaTensor_nElement(state, input);
if(count_include_pad)
hipLaunchKernelGGL(( AvePoolBackward<float, true>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count,
THCudaTensor_data(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCudaTensor_data(state, gradInput));
else
hipLaunchKernelGGL(( AvePoolBackward<float, false>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count,
THCudaTensor_data(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCudaTensor_data(state, gradInput));
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialAveragePooling.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
// clean
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
}
| 97ff81a15fce7b7211f254e33c6f307ca043ec97.cu | #include "THCUNN.h"
#include "common.h"
template <typename Dtype, bool COUNT_INCLUDE_PAD>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
if(COUNT_INCLUDE_PAD)
top_data[index] = aveval / pool_size;
else
top_data[index] = aveval / ((hend - hstart) * (wend - wstart));
}
}
void THNN_CudaSpatialAveragePooling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad)
{
THCUNN_assertSameGPU(state, 2, input, output);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
long nInputCols, nInputRows, nInputPlane, batchSize;
long nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
THArgCheck(nInputCols >= kW - 2*padW && nInputRows >= kH - 2*padH, 2, "input image smaller than kernel size");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size");
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCudaTensor_newContiguous(state, input);
float* input_data = THCudaTensor_data(state, input);
THCudaTensor_resize4d(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
float* output_data = THCudaTensor_data(state, output);
int count = THCudaTensor_nElement(state, output);
if(count_include_pad)
AvePoolForward<float, true>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
else
AvePoolForward<float, false>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
if(input->nDimension == 3)
THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_free(state, input);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialAveragePooling.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
}
template <typename Dtype, bool COUNT_INCLUDE_PAD>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if(COUNT_INCLUDE_PAD)
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
else
gradient += top_diff_slice[ph * pooled_width + pw] / ((hend - hstart) * (wend - wstart));
}
}
bottom_diff[index] = gradient;
}
}
void THNN_CudaSpatialAveragePooling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
long nInputCols, nInputRows, nInputPlane, batchSize;
long nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
THCudaTensor_resizeAs(state, gradInput, input);
int count = THCudaTensor_nElement(state, input);
if(count_include_pad)
AvePoolBackward<float, true>
<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count,
THCudaTensor_data(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCudaTensor_data(state, gradInput));
else
AvePoolBackward<float, false>
<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count,
THCudaTensor_data(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCudaTensor_data(state, gradInput));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialAveragePooling.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
// clean
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
}
|
9b203ea87db2214ecda5c29dea5adce49347f8f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hiprand/hiprand.h>
...
// Fill the array A(nr_rows_A, nr_cols_A) with random numbers on GPU
void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) {
// Create a pseudo-random number generator
hiprandGenerator_t prng;
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock());
// Fill the array with random numbers on the device
hiprandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
} | 9b203ea87db2214ecda5c29dea5adce49347f8f2.cu | #include <curand.h>
...
// Fill the array A(nr_rows_A, nr_cols_A) with random numbers on GPU
void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) {
// Create a pseudo-random number generator
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock());
// Fill the array with random numbers on the device
curandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
} |
f29b4a9a7b76482391bad3af5d083fd15b672baa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/rms_norm_op.h"
#include <vector>
#include <thrust/tuple.h>
#include "c10/hip/HIPMathCompat.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/math/reduce.cuh"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void RowwiseRMSCUDAKernel(int64_t N, T eps, const T* X, T* rrms) {
__shared__ typename BlockReduce<T>::TempStorage rms_storage;
const int64_t i = blockIdx.x;
T sum = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
sum += X[index] * X[index];
}
sum = BlockReduce<T>(rms_storage).Sum(sum);
if (threadIdx.x == 0) {
rrms[i] =
c10::hip::compat::rsqrt(sum / static_cast<T>(N) + static_cast<T>(eps));
}
}
template <typename T>
__global__ void RMSNormForwardCUDAKernel(
int64_t N,
const T* X,
const T* gamma,
const T* beta,
const T* rrms,
T* Y) {
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
Y[index] = rrms[i] * X[index] * gamma[j] + beta[j];
}
}
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* rrms,
T* c2) {
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
const int64_t i = blockIdx.x;
T ds = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int index = i * N + j;
ds += dY[index] * X[index] * gamma[j];
}
ds = BlockReduce<T>(ds_storage).Sum(ds);
if (threadIdx.x == 0) {
c2[i] = -ds * math::utils::Cube<T>(rrms[i]) / static_cast<T>(N);
}
}
template <typename T>
__global__ void RMSNormBackwardCUDAKernel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* c1,
const T* c2,
T* dX) {
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
dX[index] = c1[i] * dY[index] * gamma[j] + c2[i] * X[index];
}
}
// Assume the batch size will not be very large, direct implementation is the
// most efficient one.
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* rrms,
T* dg,
T* db) {
const int64_t j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < N) {
T sum1 = 0;
T sum2 = 0;
for (int64_t i = 0; i < M; ++i) {
const int64_t index = i * N + j;
sum1 += dY[index] * X[index] * rrms[i];
sum2 += dY[index];
}
dg[j] = sum1;
db[j] = sum2;
}
}
} // namespace
template <>
template <typename T>
bool RMSNormOp<CUDAContext>::DoRunWithType() {
const auto& X = Input(0);
const auto& gamma = Input(1);
const auto& beta = Input(2);
auto* Y = Output(0, X.sizes(), at::dtype<T>());
CAFFE_ENFORCE_GE(X.dim(), 2, "RMSNorm requires input dim >= 2.");
const int canonical_axis = X.canonical_axis_index(axis_);
const std::vector<int64_t> rms_dims(
X.sizes().cbegin(), X.sizes().cbegin() + canonical_axis);
auto* rrms = Output(1, rms_dims, at::dtype<T>());
const int64_t M = X.size_to_dim(canonical_axis);
const int64_t N = X.size_from_dim(canonical_axis);
CAFFE_ENFORCE_EQ(gamma.numel(), N);
CAFFE_ENFORCE_EQ(beta.numel(), N);
const T* X_data = X.template data<T>();
const T* gamma_data = gamma.template data<T>();
const T* beta_data = beta.template data<T>();
T* Y_data = Y->template data<T>();
T* rrms_data = rrms->template data<T>();
if (M > 0) {
hipLaunchKernelGGL(( RowwiseRMSCUDAKernel<T>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, static_cast<T>(eps_), X_data, rrms_data);
hipLaunchKernelGGL(( RMSNormForwardCUDAKernel<T>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, X_data, gamma_data, beta_data, rrms_data, Y_data);
}
return true;
}
template <>
template <typename T>
void RMSNormGradientOp<CUDAContext>::RMSNormBackward(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* rrms,
T* dX) {
ReinitializeTensor(
&c2_, {M}, at::dtype<T>().device(CUDAContext::GetDeviceType()));
T* c2_data = c2_.mutable_data<T>();
hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, dY, X, gamma, rrms, c2_data);
hipLaunchKernelGGL(( RMSNormBackwardCUDAKernel<T>)
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
N, dY, X, gamma, rrms, c2_data, dX);
}
template <>
template <typename T>
void RMSNormGradientOp<CUDAContext>::GammaBetaBackward(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* rrms,
T* dgamma,
T* dbeta) {
const int64_t B = math::DivUp<int64_t>(N, CAFFE_CUDA_NUM_THREADS);
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<T>)
, dim3(B), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
M, N, dY, X, rrms, dgamma, dbeta);
}
REGISTER_CUDA_OPERATOR(RMSNorm, RMSNormOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(RMSNormGradient, RMSNormGradientOp<CUDAContext>);
} // namespace caffe2
| f29b4a9a7b76482391bad3af5d083fd15b672baa.cu | #include "caffe2/operators/rms_norm_op.h"
#include <vector>
#include <thrust/tuple.h>
#include "c10/cuda/CUDAMathCompat.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/math/reduce.cuh"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void RowwiseRMSCUDAKernel(int64_t N, T eps, const T* X, T* rrms) {
__shared__ typename BlockReduce<T>::TempStorage rms_storage;
const int64_t i = blockIdx.x;
T sum = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
sum += X[index] * X[index];
}
sum = BlockReduce<T>(rms_storage).Sum(sum);
if (threadIdx.x == 0) {
rrms[i] =
c10::cuda::compat::rsqrt(sum / static_cast<T>(N) + static_cast<T>(eps));
}
}
template <typename T>
__global__ void RMSNormForwardCUDAKernel(
int64_t N,
const T* X,
const T* gamma,
const T* beta,
const T* rrms,
T* Y) {
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
Y[index] = rrms[i] * X[index] * gamma[j] + beta[j];
}
}
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* rrms,
T* c2) {
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
const int64_t i = blockIdx.x;
T ds = 0;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int index = i * N + j;
ds += dY[index] * X[index] * gamma[j];
}
ds = BlockReduce<T>(ds_storage).Sum(ds);
if (threadIdx.x == 0) {
c2[i] = -ds * math::utils::Cube<T>(rrms[i]) / static_cast<T>(N);
}
}
template <typename T>
__global__ void RMSNormBackwardCUDAKernel(
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* c1,
const T* c2,
T* dX) {
const int64_t i = blockIdx.x;
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
dX[index] = c1[i] * dY[index] * gamma[j] + c2[i] * X[index];
}
}
// Assume the batch size will not be very large, direct implementation is the
// most efficient one.
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* rrms,
T* dg,
T* db) {
const int64_t j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < N) {
T sum1 = 0;
T sum2 = 0;
for (int64_t i = 0; i < M; ++i) {
const int64_t index = i * N + j;
sum1 += dY[index] * X[index] * rrms[i];
sum2 += dY[index];
}
dg[j] = sum1;
db[j] = sum2;
}
}
} // namespace
template <>
template <typename T>
bool RMSNormOp<CUDAContext>::DoRunWithType() {
const auto& X = Input(0);
const auto& gamma = Input(1);
const auto& beta = Input(2);
auto* Y = Output(0, X.sizes(), at::dtype<T>());
CAFFE_ENFORCE_GE(X.dim(), 2, "RMSNorm requires input dim >= 2.");
const int canonical_axis = X.canonical_axis_index(axis_);
const std::vector<int64_t> rms_dims(
X.sizes().cbegin(), X.sizes().cbegin() + canonical_axis);
auto* rrms = Output(1, rms_dims, at::dtype<T>());
const int64_t M = X.size_to_dim(canonical_axis);
const int64_t N = X.size_from_dim(canonical_axis);
CAFFE_ENFORCE_EQ(gamma.numel(), N);
CAFFE_ENFORCE_EQ(beta.numel(), N);
const T* X_data = X.template data<T>();
const T* gamma_data = gamma.template data<T>();
const T* beta_data = beta.template data<T>();
T* Y_data = Y->template data<T>();
T* rrms_data = rrms->template data<T>();
if (M > 0) {
RowwiseRMSCUDAKernel<T>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, static_cast<T>(eps_), X_data, rrms_data);
RMSNormForwardCUDAKernel<T>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, X_data, gamma_data, beta_data, rrms_data, Y_data);
}
return true;
}
template <>
template <typename T>
void RMSNormGradientOp<CUDAContext>::RMSNormBackward(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* rrms,
T* dX) {
ReinitializeTensor(
&c2_, {M}, at::dtype<T>().device(CUDAContext::GetDeviceType()));
T* c2_data = c2_.mutable_data<T>();
ComputeInternalGradientsCUDAKernel<T>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, dY, X, gamma, rrms, c2_data);
RMSNormBackwardCUDAKernel<T>
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
N, dY, X, gamma, rrms, c2_data, dX);
}
template <>
template <typename T>
void RMSNormGradientOp<CUDAContext>::GammaBetaBackward(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* rrms,
T* dgamma,
T* dbeta) {
const int64_t B = math::DivUp<int64_t>(N, CAFFE_CUDA_NUM_THREADS);
GammaBetaBackwardCUDAKernel<T>
<<<B, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
M, N, dY, X, rrms, dgamma, dbeta);
}
REGISTER_CUDA_OPERATOR(RMSNorm, RMSNormOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(RMSNormGradient, RMSNormGradientOp<CUDAContext>);
} // namespace caffe2
|
2d1be139edb60ba02936eefee730c81667a525e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#define GET_TIME(X, Y) (((Y).tv_sec - (X).tv_sec) + ((Y).tv_nsec - (X).tv_nsec) / 1000000000.0)
#define THREADS_PER_BLOCK 1024
__constant__ __device__ int IE_d;
__constant__ __device__ int JE_d;
__constant__ __device__ float cb_d;
__constant__ __device__ int is_d;
__constant__ __device__ float pi_d;
__constant__ __device__ float freq_d;
__constant__ __device__ float dt_d;
__constant__ __device__ float db_d;
__global__ void ezCalc ( float *ez, float *hx, float *hy ) {
int i, j = blockIdx.x;
for (i = threadIdx.x; i < IE_d; i += blockDim.x) {
if (j == 0) { // at x=0
if (i == 0 || i == IE_d - 1) // at x=0,y=0
ez[j * IE_d + i] = 0.0;
else
ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1 + JE_d) * IE_d + i] - hx[j * IE_d + i]);
} else {
if (i == 0 || i == IE_d - 1)
ez[j * IE_d + i] = 0.0;
else
ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1) * IE_d + i] - hx[j * IE_d + i]);
}
}
}
__global__ void ezCalc2 ( float *ez , int n ) {
int j;
for (j = threadIdx.x; j < JE_d; j += blockDim.x) {
ez[j * IE_d + is_d] = cos(2 * pi_d * freq_d * n * dt_d);
}
}
__global__ void hCalc ( float *ez, float *hx, float *hy ) {
int i, j = blockIdx.x;
for (i = threadIdx.x; i < IE_d; i += blockDim.x) {
if (j + 1 == JE_d)
hx[j * IE_d + i] = hx[j * IE_d + i] + db_d * (ez[j * IE_d + i] - ez[i]);
else
hx[j * IE_d + i] = hx[j * IE_d + i] + db_d * (ez[j * IE_d + i] - ez[(j + 1) * JE_d + i]);
if (i == IE_d - 1)
hy[j * JE_d + i] = hy[j * JE_d + i] + db_d * (0 - ez[j * JE_d + i]);
else
hy[j * JE_d + i] = hy[j * JE_d + i] + db_d * (ez[j * JE_d + (i + 1)] - ez[j * JE_d + i]);
}
}
int main(int argc, char * argv[]) {
int IE, JE, nsteps;
int i, j, n, is;
float pi = 3.141592653589793238462643;
float * ez, * hx, * hy;
float * ez_d, *hx_d, * hy_d;
float dx, dt, epsz, mu, courant, cb, db, c, freq;
int size;
struct timespec Begin, Step0, Step1, Step2, Step3, End;
FILE * fp;
if (argc != 4) {
printf("Invalid arguments... please type:\n");
printf(" %s IE JE steps\n", argv[0]);
exit(0);
}
IE = atoi(argv[1]);
JE = atoi(argv[2]);
nsteps = atoi(argv[3]);
printf("Running 2D FDTD algorithm with matrix of size %d x %d (%d steps)\n", IE, JE, nsteps);
hipMemcpyToSymbol(pi_d, &pi, sizeof(float), 0, hipMemcpyHostToDevice);
is = 10;
hipMemcpyToSymbol(is_d, &is, sizeof(int), 0, hipMemcpyHostToDevice);
epsz = 8.854e-12;
mu = 4.0 * pi * 1.0e-7;
c = 3.0e8;
courant = 0.5;
dx = 0.001;
dt = (courant * dx) / (sqrt(2) * c);
hipMemcpyToSymbol(dt_d, &dt, sizeof(float), 0, hipMemcpyHostToDevice);
cb = dt / (epsz * dx);
db = dt / (mu * dx);
hipMemcpyToSymbol(cb_d, &cb, sizeof(float), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(db_d, &db, sizeof(float), 0, hipMemcpyHostToDevice);
printf("Coefficients are: dt=%g cb=%g db=%g\n", dt, cb, db);
size = IE * JE;
ez = (float * ) calloc(size, sizeof(float));
hx = (float * ) calloc(size, sizeof(float));
hy = (float * ) calloc(size, sizeof(float));
hipMalloc( (void **) &ez_d, size * sizeof(float));
hipMalloc( (void **) &hx_d, size * sizeof(float));
hipMalloc( (void **) &hy_d, size * sizeof(float));
freq = 50e9;
hipMemcpyToSymbol(freq_d, &freq, sizeof(float), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(JE_d, &JE, sizeof(float), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(IE_d, &IE, sizeof(float), 0, hipMemcpyHostToDevice);
if (clock_gettime(CLOCK_REALTIME, &Begin) == -1) {
perror("Error in gettime");
exit(1);
}
// Transfer initial matrices to gpu
hipMemcpy( ez_d, ez, size * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( hx_d, hx, size * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( hy_d, hy, size * sizeof(float), hipMemcpyHostToDevice );
for (n = 0; n < nsteps; n++) { // TIME
if (clock_gettime(CLOCK_REALTIME, &Step0) == -1) {
perror("Error in gettime");
exit(1);
}
//Calculate the Ez field
hipLaunchKernelGGL(( ezCalc), dim3(JE), dim3(THREADS_PER_BLOCK), 0, 0, ez_d, hx_d, hy_d );
clock_gettime(CLOCK_REALTIME, &Step1);
//Ez field generator (line)
hipLaunchKernelGGL(( ezCalc2), dim3(1), dim3(THREADS_PER_BLOCK), 0, 0, ez_d , n );
clock_gettime(CLOCK_REALTIME, &Step2);
//Calculate the H field
hipLaunchKernelGGL(( hCalc), dim3(JE), dim3(THREADS_PER_BLOCK), 0, 0, ez_d, hx_d, hy_d );
if (clock_gettime(CLOCK_REALTIME, &Step3) == -1) {
perror("Error in gettime");
exit(1);
}
}
// Retrieve matrices from gpu
hipMemcpy( ez, ez_d, size * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( hx, hx_d, size * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( hy, hy_d, size * sizeof(float), hipMemcpyDeviceToHost );
if (clock_gettime(CLOCK_REALTIME, &End) == -1) {
perror("Error in gettime");
exit(1);
}
printf("\n\n====Total time: %f\n", GET_TIME(Begin, End));
// write output to file
fp = fopen("output_gpu_v4.txt", "w");
fprintf(fp, "==================== Ez MATRIX ========================\n");
for (i = 0, j = 0;
(i < IE * JE) && (i < 1000); i++, j++) {
if (j == 8) {
fprintf(fp, "\n");
j = 0;
}
fprintf(fp, "%8f ", ez[i]);
}
fprintf(fp, "==================== Hx MATRIX ========================\n");
for (i = 0, j = 0;
(i < IE * JE) && (i < 1000); i++, j++) {
if (j == 8) {
fprintf(fp, "\n");
j = 0;
}
fprintf(fp, "%8f ", hx[i]);
}
fprintf(fp, "==================== Hy MATRIX ========================\n");
for (i = 0, j = 0;
(i < IE * JE) && (i < 1000); i++, j++) {
if (j == 8) {
fprintf(fp, "\n");
j = 0;
}
fprintf(fp, "%8f ", hy[i]);
}
free(ez);
free(hy);
free(hx);
hipFree( ez_d );
hipFree( hx_d );
hipFree( hy_d );
return 0;
}
| 2d1be139edb60ba02936eefee730c81667a525e2.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#define GET_TIME(X, Y) (((Y).tv_sec - (X).tv_sec) + ((Y).tv_nsec - (X).tv_nsec) / 1000000000.0)
#define THREADS_PER_BLOCK 1024
__constant__ __device__ int IE_d;
__constant__ __device__ int JE_d;
__constant__ __device__ float cb_d;
__constant__ __device__ int is_d;
__constant__ __device__ float pi_d;
__constant__ __device__ float freq_d;
__constant__ __device__ float dt_d;
__constant__ __device__ float db_d;
__global__ void ezCalc ( float *ez, float *hx, float *hy ) {
int i, j = blockIdx.x;
for (i = threadIdx.x; i < IE_d; i += blockDim.x) {
if (j == 0) { // at x=0
if (i == 0 || i == IE_d - 1) // at x=0,y=0
ez[j * IE_d + i] = 0.0;
else
ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1 + JE_d) * IE_d + i] - hx[j * IE_d + i]);
} else {
if (i == 0 || i == IE_d - 1)
ez[j * IE_d + i] = 0.0;
else
ez[j * IE_d + i] = ez[j * IE_d + i] + cb_d * (hy[j * IE_d + i] - hy[j * IE_d + (i - 1)] + hx[(j - 1) * IE_d + i] - hx[j * IE_d + i]);
}
}
}
__global__ void ezCalc2 ( float *ez , int n ) {
int j;
for (j = threadIdx.x; j < JE_d; j += blockDim.x) {
ez[j * IE_d + is_d] = cos(2 * pi_d * freq_d * n * dt_d);
}
}
__global__ void hCalc ( float *ez, float *hx, float *hy ) {
int i, j = blockIdx.x;
for (i = threadIdx.x; i < IE_d; i += blockDim.x) {
if (j + 1 == JE_d)
hx[j * IE_d + i] = hx[j * IE_d + i] + db_d * (ez[j * IE_d + i] - ez[i]);
else
hx[j * IE_d + i] = hx[j * IE_d + i] + db_d * (ez[j * IE_d + i] - ez[(j + 1) * JE_d + i]);
if (i == IE_d - 1)
hy[j * JE_d + i] = hy[j * JE_d + i] + db_d * (0 - ez[j * JE_d + i]);
else
hy[j * JE_d + i] = hy[j * JE_d + i] + db_d * (ez[j * JE_d + (i + 1)] - ez[j * JE_d + i]);
}
}
int main(int argc, char * argv[]) {
int IE, JE, nsteps;
int i, j, n, is;
float pi = 3.141592653589793238462643;
float * ez, * hx, * hy;
float * ez_d, *hx_d, * hy_d;
float dx, dt, epsz, mu, courant, cb, db, c, freq;
int size;
struct timespec Begin, Step0, Step1, Step2, Step3, End;
FILE * fp;
if (argc != 4) {
printf("Invalid arguments... please type:\n");
printf(" %s IE JE steps\n", argv[0]);
exit(0);
}
IE = atoi(argv[1]);
JE = atoi(argv[2]);
nsteps = atoi(argv[3]);
printf("Running 2D FDTD algorithm with matrix of size %d x %d (%d steps)\n", IE, JE, nsteps);
cudaMemcpyToSymbol(pi_d, &pi, sizeof(float), 0, cudaMemcpyHostToDevice);
is = 10;
cudaMemcpyToSymbol(is_d, &is, sizeof(int), 0, cudaMemcpyHostToDevice);
epsz = 8.854e-12;
mu = 4.0 * pi * 1.0e-7;
c = 3.0e8;
courant = 0.5;
dx = 0.001;
dt = (courant * dx) / (sqrt(2) * c);
cudaMemcpyToSymbol(dt_d, &dt, sizeof(float), 0, cudaMemcpyHostToDevice);
cb = dt / (epsz * dx);
db = dt / (mu * dx);
cudaMemcpyToSymbol(cb_d, &cb, sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(db_d, &db, sizeof(float), 0, cudaMemcpyHostToDevice);
printf("Coefficients are: dt=%g cb=%g db=%g\n", dt, cb, db);
size = IE * JE;
ez = (float * ) calloc(size, sizeof(float));
hx = (float * ) calloc(size, sizeof(float));
hy = (float * ) calloc(size, sizeof(float));
cudaMalloc( (void **) &ez_d, size * sizeof(float));
cudaMalloc( (void **) &hx_d, size * sizeof(float));
cudaMalloc( (void **) &hy_d, size * sizeof(float));
freq = 50e9;
cudaMemcpyToSymbol(freq_d, &freq, sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(JE_d, &JE, sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(IE_d, &IE, sizeof(float), 0, cudaMemcpyHostToDevice);
if (clock_gettime(CLOCK_REALTIME, &Begin) == -1) {
perror("Error in gettime");
exit(1);
}
// Transfer initial matrices to gpu
cudaMemcpy( ez_d, ez, size * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( hx_d, hx, size * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( hy_d, hy, size * sizeof(float), cudaMemcpyHostToDevice );
for (n = 0; n < nsteps; n++) { // TIME
if (clock_gettime(CLOCK_REALTIME, &Step0) == -1) {
perror("Error in gettime");
exit(1);
}
//Calculate the Ez field
ezCalc<<<JE, THREADS_PER_BLOCK>>>( ez_d, hx_d, hy_d );
clock_gettime(CLOCK_REALTIME, &Step1);
//Ez field generator (line)
ezCalc2<<<1, THREADS_PER_BLOCK>>>( ez_d , n );
clock_gettime(CLOCK_REALTIME, &Step2);
//Calculate the H field
hCalc<<<JE, THREADS_PER_BLOCK>>>( ez_d, hx_d, hy_d );
if (clock_gettime(CLOCK_REALTIME, &Step3) == -1) {
perror("Error in gettime");
exit(1);
}
}
// Retrieve matrices from gpu
cudaMemcpy( ez, ez_d, size * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( hx, hx_d, size * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( hy, hy_d, size * sizeof(float), cudaMemcpyDeviceToHost );
if (clock_gettime(CLOCK_REALTIME, &End) == -1) {
perror("Error in gettime");
exit(1);
}
printf("\n\n====Total time: %f\n", GET_TIME(Begin, End));
// write output to file
fp = fopen("output_gpu_v4.txt", "w");
fprintf(fp, "==================== Ez MATRIX ========================\n");
for (i = 0, j = 0;
(i < IE * JE) && (i < 1000); i++, j++) {
if (j == 8) {
fprintf(fp, "\n");
j = 0;
}
fprintf(fp, "%8f ", ez[i]);
}
fprintf(fp, "==================== Hx MATRIX ========================\n");
for (i = 0, j = 0;
(i < IE * JE) && (i < 1000); i++, j++) {
if (j == 8) {
fprintf(fp, "\n");
j = 0;
}
fprintf(fp, "%8f ", hx[i]);
}
fprintf(fp, "==================== Hy MATRIX ========================\n");
for (i = 0, j = 0;
(i < IE * JE) && (i < 1000); i++, j++) {
if (j == 8) {
fprintf(fp, "\n");
j = 0;
}
fprintf(fp, "%8f ", hy[i]);
}
free(ez);
free(hy);
free(hx);
cudaFree( ez_d );
cudaFree( hx_d );
cudaFree( hy_d );
return 0;
}
|
569a768b5f7d0ff8b3f0940477bca1bbf9c2d8af.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <vector>
#include <iomanip>
#include "caffe/layers/beam_search_layer.hpp"
namespace caffe {
template <typename Dtype>
void BeamSearchLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int batch_size = bottom[0]->shape(0);
const int nthreads = this->input_sequence_->shape(0);
// Copy bottom input into the beam search net, duplicating for size of beam search
this->copy_bottom_inputs_gpu(bottom);
// Zero sequence, input and recurrent connections for first pass
this->clear_recurrent_inputs_gpu();
for (int timestep = 0; timestep < this->sequence_length_; ++timestep) {
this->net_->Forward();
this->sort_beam_expansions_gpu();
this->sum_expansion_scores_gpu(top, timestep);
// Find the overall beam_size best sequences for each input
this->sort_scores_gpu();
// Save outputs
this->generate_output(top, timestep);
/*
hipDeviceSynchronize();
LOG(INFO) << "First item beams at t=" << timestep;
const Dtype* sequence_output = top[0]->cpu_data();
const Dtype* score_output = top[1]->cpu_data();
for (int l=0; l<this->beam_size_; ++l) {
std::ostringstream os;
for (int k=0; k<this->sequence_length_; ++k) {
os << std::setfill(' ') << std::setw(4) << sequence_output[l*this->sequence_length_+k] << " ";
}
os << std::setfill(' ') << std::setw(4) << score_output[l];
LOG(INFO) << os.str();
}*/
bool exiting = (timestep == this->sequence_length_-1);
if (!exiting) {
//Check for early exit
hipDeviceSynchronize();
exiting = true;
const Dtype* sequence_output = top[0]->cpu_data();
for (int idx = 0; idx < nthreads; ++idx) {
if (sequence_output[idx*this->sequence_length_+timestep] != Dtype(this->end_of_sequence_)){
exiting = false;
}
}
}
if (exiting){
break;
}
this->copy_back_recurrent_inputs_gpu(timestep);
}
}
INSTANTIATE_LAYER_GPU_FORWARD(BeamSearchLayer);
} // namespace caffe
| 569a768b5f7d0ff8b3f0940477bca1bbf9c2d8af.cu | #include <algorithm>
#include <vector>
#include <iomanip>
#include "caffe/layers/beam_search_layer.hpp"
namespace caffe {
template <typename Dtype>
void BeamSearchLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int batch_size = bottom[0]->shape(0);
const int nthreads = this->input_sequence_->shape(0);
// Copy bottom input into the beam search net, duplicating for size of beam search
this->copy_bottom_inputs_gpu(bottom);
// Zero sequence, input and recurrent connections for first pass
this->clear_recurrent_inputs_gpu();
for (int timestep = 0; timestep < this->sequence_length_; ++timestep) {
this->net_->Forward();
this->sort_beam_expansions_gpu();
this->sum_expansion_scores_gpu(top, timestep);
// Find the overall beam_size best sequences for each input
this->sort_scores_gpu();
// Save outputs
this->generate_output(top, timestep);
/*
cudaDeviceSynchronize();
LOG(INFO) << "First item beams at t=" << timestep;
const Dtype* sequence_output = top[0]->cpu_data();
const Dtype* score_output = top[1]->cpu_data();
for (int l=0; l<this->beam_size_; ++l) {
std::ostringstream os;
for (int k=0; k<this->sequence_length_; ++k) {
os << std::setfill(' ') << std::setw(4) << sequence_output[l*this->sequence_length_+k] << " ";
}
os << std::setfill(' ') << std::setw(4) << score_output[l];
LOG(INFO) << os.str();
}*/
bool exiting = (timestep == this->sequence_length_-1);
if (!exiting) {
//Check for early exit
cudaDeviceSynchronize();
exiting = true;
const Dtype* sequence_output = top[0]->cpu_data();
for (int idx = 0; idx < nthreads; ++idx) {
if (sequence_output[idx*this->sequence_length_+timestep] != Dtype(this->end_of_sequence_)){
exiting = false;
}
}
}
if (exiting){
break;
}
this->copy_back_recurrent_inputs_gpu(timestep);
}
}
INSTANTIATE_LAYER_GPU_FORWARD(BeamSearchLayer);
} // namespace caffe
|
db1c913b668c40536959090a9ba5803bc6d5f751.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
/*
* Description:
* this function finds the max along the innermost dimension
* Nd input, (N-1)d output, (N-1)d argmax
*/
__global__ void max_output(float *input, float *output, float *indices,
long nrows, long ncols)
{
// output offset:
long o = threadIdx.x + blockDim.x * blockIdx.x;
if (o >= nrows) return;
// input offset:
long i = o * ncols;
// move pointers
input = input + i;
// compute max:
float max = input[0];
long argmax = 0;
long ii;
for (ii=1; ii<ncols; ii++) {
float val = input[ii];
if (val > max) {
max = val;
argmax = ii;
}
}
// store
output[o] = max;
indices[o] = argmax+1;
}
__global__ void max_gradInput(float *input, float *output, float *indices,
long nrows, long ncols)
{
// output offset:
long o = threadIdx.x + blockDim.x * blockIdx.x;
if (o >= nrows) return;
// input offset:
long i = o * ncols;
// bprop max gradient:
long idx = indices[o]-1;
input[i+idx] = output[o];
}
static int cunn_Max_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
int dimension = luaT_getfieldcheckint(L, 1, "dimension")-1;
THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
luaL_argcheck(L, dimension >= 0 && dimension < input->nDimension, 2, "dimension out of range");
luaL_argcheck(L, dimension == input->nDimension-1, 2, "only supported dimension is innermost (CUDA kernel only)");
input = THCudaTensor_newContiguous(state, input);
THLongStorage *dim = THLongStorage_newWithSize(input->nDimension);
long i;
for(i = 0; i < input->nDimension; i++)
dim->data[i] = input->size[i];
dim->data[dimension] = 1;
THCudaTensor_resize(state, output, dim, NULL);
THCudaTensor_resize(state, indices, dim, NULL);
THLongStorage_free(dim);
float *input_data = THCudaTensor_data(state, input);
float *output_data = THCudaTensor_data(state, output);
float *indices_data = THCudaTensor_data(state, indices);
long nrows = THCudaTensor_nElement(state, output);
long ncols = input->size[dimension];
// cuda blocks & threads:
long nthreads = 256;
long nblocks = ceil((float)nrows / nthreads);
dim3 blocks(nblocks);
dim3 threads(nthreads);
// kernel:
hipLaunchKernelGGL(( max_output) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data, indices_data, nrows, ncols);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in Max.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
// final cut:
THCudaTensor_free(state, input);
THCudaTensor_select(state, output, NULL, dimension, 0);
return 1;
}
static int cunn_Max_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor");
int dimension = luaT_getfieldcheckint(L, 1, "dimension")-1;
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
float *gradInput_data = THCudaTensor_data(state, gradInput);
float *gradOutput_data = THCudaTensor_data(state, gradOutput);
float *indices_data = THCudaTensor_data(state, indices);
long nrows = THCudaTensor_nElement(state, gradOutput);
long ncols = gradInput->size[dimension];
// cuda blocks & threads:
long nthreads = 256;
long nblocks = ceil((float)nrows / nthreads);
dim3 blocks(nblocks);
dim3 threads(nthreads);
// kernel:
hipLaunchKernelGGL(( max_gradInput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data, indices_data, nrows, ncols);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in Max.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_Max__ [] = {
{"Max_updateOutput", cunn_Max_updateOutput},
{"Max_updateGradInput", cunn_Max_updateGradInput},
{NULL, NULL}
};
static void cunn_Max_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_Max__, "nn");
lua_pop(L,1);
}
| db1c913b668c40536959090a9ba5803bc6d5f751.cu | #include "utils.h"
/*
* Description:
* this function finds the max along the innermost dimension
* Nd input, (N-1)d output, (N-1)d argmax
*/
__global__ void max_output(float *input, float *output, float *indices,
long nrows, long ncols)
{
// output offset:
long o = threadIdx.x + blockDim.x * blockIdx.x;
if (o >= nrows) return;
// input offset:
long i = o * ncols;
// move pointers
input = input + i;
// compute max:
float max = input[0];
long argmax = 0;
long ii;
for (ii=1; ii<ncols; ii++) {
float val = input[ii];
if (val > max) {
max = val;
argmax = ii;
}
}
// store
output[o] = max;
indices[o] = argmax+1;
}
__global__ void max_gradInput(float *input, float *output, float *indices,
long nrows, long ncols)
{
// output offset:
long o = threadIdx.x + blockDim.x * blockIdx.x;
if (o >= nrows) return;
// input offset:
long i = o * ncols;
// bprop max gradient:
long idx = indices[o]-1;
input[i+idx] = output[o];
}
static int cunn_Max_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
int dimension = luaT_getfieldcheckint(L, 1, "dimension")-1;
THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
luaL_argcheck(L, dimension >= 0 && dimension < input->nDimension, 2, "dimension out of range");
luaL_argcheck(L, dimension == input->nDimension-1, 2, "only supported dimension is innermost (CUDA kernel only)");
input = THCudaTensor_newContiguous(state, input);
THLongStorage *dim = THLongStorage_newWithSize(input->nDimension);
long i;
for(i = 0; i < input->nDimension; i++)
dim->data[i] = input->size[i];
dim->data[dimension] = 1;
THCudaTensor_resize(state, output, dim, NULL);
THCudaTensor_resize(state, indices, dim, NULL);
THLongStorage_free(dim);
float *input_data = THCudaTensor_data(state, input);
float *output_data = THCudaTensor_data(state, output);
float *indices_data = THCudaTensor_data(state, indices);
long nrows = THCudaTensor_nElement(state, output);
long ncols = input->size[dimension];
// cuda blocks & threads:
long nthreads = 256;
long nblocks = ceil((float)nrows / nthreads);
dim3 blocks(nblocks);
dim3 threads(nthreads);
// kernel:
max_output <<<blocks, threads>>> (input_data, output_data, indices_data, nrows, ncols);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in Max.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
// final cut:
THCudaTensor_free(state, input);
THCudaTensor_select(state, output, NULL, dimension, 0);
return 1;
}
static int cunn_Max_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor");
int dimension = luaT_getfieldcheckint(L, 1, "dimension")-1;
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
float *gradInput_data = THCudaTensor_data(state, gradInput);
float *gradOutput_data = THCudaTensor_data(state, gradOutput);
float *indices_data = THCudaTensor_data(state, indices);
long nrows = THCudaTensor_nElement(state, gradOutput);
long ncols = gradInput->size[dimension];
// cuda blocks & threads:
long nthreads = 256;
long nblocks = ceil((float)nrows / nthreads);
dim3 blocks(nblocks);
dim3 threads(nthreads);
// kernel:
max_gradInput <<<blocks, threads>>> (gradInput_data, gradOutput_data, indices_data, nrows, ncols);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in Max.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_Max__ [] = {
{"Max_updateOutput", cunn_Max_updateOutput},
{"Max_updateGradInput", cunn_Max_updateGradInput},
{NULL, NULL}
};
static void cunn_Max_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_Max__, "nn");
lua_pop(L,1);
}
|
6472e5020eb74189d05764870dfb2719a16b2765.hip | // !!! This is a file automatically generated by hipify!!!
/*
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include "dock.h"
#include "gpu.cuh"
*/
/*
#define expf(a) (a)
#define powf(a,b) (a+b)
#define logf(a) (a)
#define sqrtf(a) (a)
*/
__device__ void
CalcEnergy_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
// reduce all points on the X-Y plate
__shared__ float evdw[TperB]; // e[0]
__shared__ float eele[TperB]; // e[1]
__shared__ float epmf[TperB]; // e[2]
__shared__ float epsp[TperB]; // e[3]
__shared__ float ehdb[TperB]; // e[4]
// reduce through only x axis
__shared__ float a_val[BDy][BDx]; // reused by hpc, kde, lhm ???????
__shared__ float a_sz[BDy][BDx]; // ???????
__shared__ float ehpc[BDy]; // e[5]
__shared__ float ekde[BDy]; // e[6]
__shared__ float elhm[BDy]; // e[7]
evdw[bidx] = 0.0f;
eele[bidx] = 0.0f;
epmf[bidx] = 0.0f;
epsp[bidx] = 0.0f;
ehdb[bidx] = 0.0f;
if (bidx < BDy) {
ehpc[bidx] = 0.0f;
ekde[bidx] = 0.0f;
elhm[bidx] = 0.0f;
}
__syncthreads ();
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int j = 0; j < pnp_dc; j += blockDim.x) {
const int p = j + threadIdx.x;
if (p < pnp_dc) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst_pow2 = dx * dx + dy * dy + dz * dz;
const float dst_pow4 = dst_pow2 * dst_pow2;
const float dst = sqrtf (dst_pow2);
/* hydrophobic potential */
if (myprt->c0_and_d12_or_c2[p] == 1 && dst_pow2 <= 81.0f) {
a_val[threadIdx.y][threadIdx.x] += myprt->hpp[p] *
(1.0f - (3.5f / 81.0f * dst_pow2 -
4.5f / 81.0f / 81.0f * dst_pow4 +
2.5f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow2 -
0.5f / 81.0f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow4));
}
/* L-J potential */
const float p1 = enepara_dc->p1a[lig_t][prt_t] / (dst_pow4 * dst_pow4 * dst);
const float p2 = enepara_dc->p2a[lig_t][prt_t] / (dst_pow4 * dst_pow2);
const float p4 = p1 * enepara_lj0_dc * (1.0f + enepara_lj1_dc * dst_pow2) + 1.0f;
evdw[bidx] += (p1 - p2) / p4;
/* electrostatic potential */
const float s1 = enepara_el1_dc * dst;
float g1;
if (s1 < 1)
g1 = enepara_el0_dc + enepara_a1_dc * s1 * s1 + enepara_b1_dc * s1 * s1 * s1;
else
g1 = 1.0f / s1;
eele[bidx] += mylig->c[l] * myprt->ele[p] * g1;
/* contact potential */
const float dst_minus_pmf0 = dst - enepara_dc->pmf0[lig_t][prt_t];
epmf[bidx] +=
enepara_dc->pmf1[lig_t][prt_t] /
(1.0f + expf ((-0.5f * dst + 6.0f) * dst_minus_pmf0));
/* pocket-specific potential */
// the senmatics do not match with the original program:
// if (found psp[][])
// accumulate to epsp;
// else
// do nothing
if (myprt->c[p] == 2 && dst_minus_pmf0 <= 0) {
const int i1 = myprt->seq3r[p];
epsp[bidx] += psp_dc->psp[lig_t][i1]; // sparse matrix
}
/* hydrogen bond potential */
const float hdb0 = enepara_dc->hdb0[lig_t][prt_t];
if (hdb0 > 0.1f) {
const float hdb1 = enepara_dc->hdb1[lig_t][prt_t];
const float hdb3 = (dst - hdb0) * hdb1;
ehdb[bidx] += hdb1 * expf (-0.5f * hdb3 * hdb3);
}
} // if (p < pnp_dc)
} // prt loop
} // if (l < lna_dc)
/* hydrophobic restraits*/
SumReduction2D_d (a_val);
// transpose may help improve the performance
if (threadIdx.x == 0 && l < lna_dc) {
const int lig_t = mylig->t[l];
const float hpc2 = (a_val[threadIdx.y][0] - enepara_dc->hpl0[lig_t]) / enepara_dc->hpl1[lig_t];
ehpc[threadIdx.y] += 0.5f * hpc2 * hpc2 - enepara_dc->hpl2[lig_t];
}
} // lig loop
SumReduction1D_5_d (bidx, evdw, eele, epmf, epsp, ehdb);
if (bidx == 0) {
float eehpc = 0.0f;
for (int i = 0; i < BDy; ++i)
eehpc += ehpc[i];
ehpc[0] = eehpc;
}
#if 1
/* kde potential */
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
// kde loop, ~400
for (int j = 0; j < pnk_dc; j += blockDim.x) {
const int k = j + threadIdx.x;
if (k < pnk_dc) {
if (mylig->t[l] == kde_dc->t[k]) {
const float dx = mylig->coord_new.x[l] - kde_dc->x[k];
const float dy = mylig->coord_new.y[l] - kde_dc->y[k];
const float dz = mylig->coord_new.z[l] - kde_dc->z[k];
const float kde_dst_pow2 = dx * dx + dy * dy + dz * dz;
a_val[threadIdx.y][threadIdx.x] += expf (enepara_kde2_dc * kde_dst_pow2);
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (k < pnk_dc)
} // kde loop
} // if (l < lna_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && l < lna_dc && a_sz[threadIdx.y][0] != 0.0f)
ekde[threadIdx.y] += (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
} // lig loop
__syncthreads ();
if (bidx == 0) {
float eekde = 0.0f;
for (int i = 0; i < BDy; ++i)
eekde += ekde[i];
eekde = eekde / enepara_kde3_dc;
ekde[0] = eekde;
}
__syncthreads ();
#endif
#if 1
/* position restraints */
// lhm loop, ~11
for (int i = 0; i < pos_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int m = i + threadIdx.y;
if (m < pos_dc) {
// lig loop, ~30
for (int j = 0; j < lna_dc; j += blockDim.x) {
const int l = j + threadIdx.x;
if (l < lna_dc) {
const int lig_n = mylig->n[l] + 1;
if (mcs_dc[m].x[lig_n] != MCS_INVALID_COORD) {
const float dx = mylig->coord_new.x[l] - mcs_dc[m].x[lig_n];
const float dy = mylig->coord_new.y[l] - mcs_dc[m].y[lig_n];
const float dz = mylig->coord_new.z[l] - mcs_dc[m].z[lig_n];
a_val[threadIdx.y][threadIdx.x] += dx * dx + dy * dy + dz * dz;
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (l < lna_dc)
} // lig loop
} // if (m < pos_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && m < pos_dc) {
elhm[threadIdx.y] +=
mcs_dc[m].tcc *
sqrtf (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
}
} // lhm loop
__syncthreads ();
if (bidx == 0) {
float eelhm = 0.0f;
for (int i = 0; i < BDy; ++i)
eelhm += elhm[i];
// dropped the protection (if pos_dc != 0)
eelhm = logf (eelhm / pos_dc);
elhm[0] = eelhm;
}
__syncthreads ();
#endif
// energy edst e[8]
__shared__ float edst;
if (bidx == 0) {
const float dx = mylig->coord_new.center[0] - myprt->pocket_center[0];
const float dy = mylig->coord_new.center[1] - myprt->pocket_center[1];
const float dz = mylig->coord_new.center[2] - myprt->pocket_center[2];
edst = sqrtf (dx * dx + dy * dy + dz * dz);
}
__syncthreads ();
if (bidx == 0) {
evdw[0] = evdw[0] / lna_dc;
eele[0] = eele[0] / lna_dc;
epmf[0] = epmf[0] / lna_dc;
epsp[0] = epsp[0] / lna_dc;
ehdb[0] = ehdb[0] / lna_dc / sqrtf (2.0f * PI) * -1.0f;
// ehdb[0] = ehdb[0] / lna_dc; // using hdb2 is faster
ehpc[0] = ehpc[0] / lna_dc;
ekde[0] = ekde[0] / lna_dc;
#if IS_NORM == 1
// calculate normalized energy
evdw[0] = enepara_dc->a_para[0] * evdw[0] + enepara_dc->b_para[0];
eele[0] = enepara_dc->a_para[1] * eele[0] + enepara_dc->b_para[1];
epmf[0] = enepara_dc->a_para[2] * epmf[0] + enepara_dc->b_para[2];
ehpc[0] = enepara_dc->a_para[3] * ehpc[0] + enepara_dc->b_para[3];
ehdb[0] = enepara_dc->a_para[4] * ehdb[0] + enepara_dc->b_para[4];
edst = enepara_dc->a_para[5] * edst + enepara_dc->b_para[5];
epsp[0] = enepara_dc->a_para[6] * epsp[0] + enepara_dc->b_para[6];
ekde[0] = enepara_dc->a_para[7] * ekde[0] + enepara_dc->b_para[7];
elhm[0] = enepara_dc->a_para[8] * elhm[0] + enepara_dc->b_para[8];
#endif
#if IS_BAYE == 1
// calculate conditional prob belonging to high decoy
const float evdw_h = NormPdf(evdw[0], VDW_NORM_HIGH_LOC, VDW_NORM_HIGH_SCALE);
const float evdw_l = NormPdf(evdw[0], VDW_NORM_LOW_LOC, VDW_NORM_LOW_SCALE);
const float eele_h = CauchyPdf(eele[0], ELE_CAUCHY_HIGH_LOC, ELE_CAUCHY_HIGH_SCALE);
const float eele_l = CauchyPdf(eele[0], ELE_CAUCHY_LOW_LOC, ELE_CAUCHY_LOW_SCALE);
const float epmf_h = LogisticPdf(epmf[0], PMF_LOGISTIC_HIGH_LOC, PMF_LOGISTIC_HIGH_SCALE);
const float epmf_l = LogisticPdf(epmf[0], PMF_LOGISTIC_LOW_LOC, PMF_LOGISTIC_LOW_SCALE);
const float ehpc_h = WaldPdf(ehpc[0], HPC_WALD_HIGH_LOC, HPC_WALD_HIGH_SCALE);
const float ehpc_l = WaldPdf(ehpc[0], HPC_WALD_LOW_LOC, HPC_WALD_LOW_SCALE);
const float ehdb_h = NormPdf(ehdb[0], HDB_NORM_HIGH_LOC, HDB_NORM_HIGH_SCALE);
const float ehdb_l = NormPdf(ehdb[0], HDB_LOGISTIC_LOW_LOC, HDB_LOGISTIC_LOW_SCALE);
const float edst_h = LogisticPdf(edst, DST_LOGISTIC_HIGH_LOC, DST_LOGISTIC_HIGH_SCALE);
const float edst_l = LogisticPdf(edst, DST_LOGISTIC_LOW_LOC, DST_LOGISTIC_LOW_SCALE);
const float epsp_h = LogisticPdf(epsp[0], PSP_LOGISTIC_HIGH_LOC, PSP_LOGISTIC_HIGH_SCALE);
const float epsp_l = LogisticPdf(epsp[0], PSP_LAPLACE_LOW_LOC, PSP_LAPLACE_LOW_SCALE);
const float ekde_h = WaldPdf(ekde[0], KDE_WALD_HIGH_LOC, KDE_WALD_HIGH_SCALE);
const float ekde_l = WaldPdf(ekde[0], KDE_WALD_LOW_LOC, KDE_WALD_LOW_SCALE);
const float elhm_h = LogisticPdf(elhm[0], LHM_LOGISTIC_HIGH_LOC, LHM_LOGISTIC_HIGH_SCALE);
const float elhm_l = LogisticPdf(elhm[0], LHM_LOGISTIC_LOW_LOC, LHM_LOGISTIC_LOW_SCALE);
// calculate conditional prob
const float prob_h = log10f(evdw_h) + log10f(eele_h) + log10f(epmf_h) + log10f(ehpc_h) + log10f(ehdb_h)
+ log10f(edst_h) + log10f(epsp_h) + log10f(ekde_h) + log10f(elhm_h);
const float prob_l = log10f(evdw_l) + log10f(eele_l) + log10f(epmf_l) + log10f(ehpc_l) + log10f(ehdb_l)
+ log10f(edst_l) + log10f(epsp_l) + log10f(ekde_l) + log10f(elhm_l);
const float etotal = prob_l - prob_h;
#elif IS_BAYE == 0
#if IS_OPT == 1
const float etotal =
enepara_dc->w[0] * evdw[0] +
enepara_dc->w[1] * eele[0] +
enepara_dc->w[2] * epmf[0] +
enepara_dc->w[3] * ehpc[0] +
enepara_dc->w[4] * ehdb[0] +
enepara_dc->w[5] * edst +
enepara_dc->w[6] * epsp[0] +
enepara_dc->w[7] * ekde[0] +
enepara_dc->w[8] * elhm[0];
#elif IS_OPT == 0
const float etotal = evdw[0] + edst;
#endif
#endif
float * e = &mylig->energy_new.e[0];
e[0] = evdw[0];
e[1] = eele[0];
e[2] = epmf[0];
e[3] = epsp[0];
e[4] = ehdb[0];
e[5] = ehpc[0];
e[6] = ekde[0];
e[7] = elhm[0];
e[8] = edst;
e[9] = etotal;
// e[9] = edst;
}
}
| 6472e5020eb74189d05764870dfb2719a16b2765.cu | /*
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include "dock.h"
#include "gpu.cuh"
*/
/*
#define expf(a) (a)
#define powf(a,b) (a+b)
#define logf(a) (a)
#define sqrtf(a) (a)
*/
__device__ void
CalcEnergy_d (const int bidx, Ligand * __restrict__ mylig, const Protein * myprt)
{
// reduce all points on the X-Y plate
__shared__ float evdw[TperB]; // e[0]
__shared__ float eele[TperB]; // e[1]
__shared__ float epmf[TperB]; // e[2]
__shared__ float epsp[TperB]; // e[3]
__shared__ float ehdb[TperB]; // e[4]
// reduce through only x axis
__shared__ float a_val[BDy][BDx]; // reused by hpc, kde, lhm ???????
__shared__ float a_sz[BDy][BDx]; // ???????
__shared__ float ehpc[BDy]; // e[5]
__shared__ float ekde[BDy]; // e[6]
__shared__ float elhm[BDy]; // e[7]
evdw[bidx] = 0.0f;
eele[bidx] = 0.0f;
epmf[bidx] = 0.0f;
epsp[bidx] = 0.0f;
ehdb[bidx] = 0.0f;
if (bidx < BDy) {
ehpc[bidx] = 0.0f;
ekde[bidx] = 0.0f;
elhm[bidx] = 0.0f;
}
__syncthreads ();
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
const int lig_t = mylig->t[l];
// prt loop, ~300
for (int j = 0; j < pnp_dc; j += blockDim.x) {
const int p = j + threadIdx.x;
if (p < pnp_dc) {
const int prt_t = myprt->t[p];
const float dx = mylig->coord_new.x[l] - myprt->x[p];
const float dy = mylig->coord_new.y[l] - myprt->y[p];
const float dz = mylig->coord_new.z[l] - myprt->z[p];
const float dst_pow2 = dx * dx + dy * dy + dz * dz;
const float dst_pow4 = dst_pow2 * dst_pow2;
const float dst = sqrtf (dst_pow2);
/* hydrophobic potential */
if (myprt->c0_and_d12_or_c2[p] == 1 && dst_pow2 <= 81.0f) {
a_val[threadIdx.y][threadIdx.x] += myprt->hpp[p] *
(1.0f - (3.5f / 81.0f * dst_pow2 -
4.5f / 81.0f / 81.0f * dst_pow4 +
2.5f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow2 -
0.5f / 81.0f / 81.0f / 81.0f / 81.0f * dst_pow4 * dst_pow4));
}
/* L-J potential */
const float p1 = enepara_dc->p1a[lig_t][prt_t] / (dst_pow4 * dst_pow4 * dst);
const float p2 = enepara_dc->p2a[lig_t][prt_t] / (dst_pow4 * dst_pow2);
const float p4 = p1 * enepara_lj0_dc * (1.0f + enepara_lj1_dc * dst_pow2) + 1.0f;
evdw[bidx] += (p1 - p2) / p4;
/* electrostatic potential */
const float s1 = enepara_el1_dc * dst;
float g1;
if (s1 < 1)
g1 = enepara_el0_dc + enepara_a1_dc * s1 * s1 + enepara_b1_dc * s1 * s1 * s1;
else
g1 = 1.0f / s1;
eele[bidx] += mylig->c[l] * myprt->ele[p] * g1;
/* contact potential */
const float dst_minus_pmf0 = dst - enepara_dc->pmf0[lig_t][prt_t];
epmf[bidx] +=
enepara_dc->pmf1[lig_t][prt_t] /
(1.0f + expf ((-0.5f * dst + 6.0f) * dst_minus_pmf0));
/* pocket-specific potential */
// the senmatics do not match with the original program:
// if (found psp[][])
// accumulate to epsp;
// else
// do nothing
if (myprt->c[p] == 2 && dst_minus_pmf0 <= 0) {
const int i1 = myprt->seq3r[p];
epsp[bidx] += psp_dc->psp[lig_t][i1]; // sparse matrix
}
/* hydrogen bond potential */
const float hdb0 = enepara_dc->hdb0[lig_t][prt_t];
if (hdb0 > 0.1f) {
const float hdb1 = enepara_dc->hdb1[lig_t][prt_t];
const float hdb3 = (dst - hdb0) * hdb1;
ehdb[bidx] += hdb1 * expf (-0.5f * hdb3 * hdb3);
}
} // if (p < pnp_dc)
} // prt loop
} // if (l < lna_dc)
/* hydrophobic restraits*/
SumReduction2D_d (a_val);
// transpose may help improve the performance
if (threadIdx.x == 0 && l < lna_dc) {
const int lig_t = mylig->t[l];
const float hpc2 = (a_val[threadIdx.y][0] - enepara_dc->hpl0[lig_t]) / enepara_dc->hpl1[lig_t];
ehpc[threadIdx.y] += 0.5f * hpc2 * hpc2 - enepara_dc->hpl2[lig_t];
}
} // lig loop
SumReduction1D_5_d (bidx, evdw, eele, epmf, epsp, ehdb);
if (bidx == 0) {
float eehpc = 0.0f;
for (int i = 0; i < BDy; ++i)
eehpc += ehpc[i];
ehpc[0] = eehpc;
}
#if 1
/* kde potential */
// lig loop, ~30
for (int i = 0; i < lna_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int l = i + threadIdx.y;
if (l < lna_dc) {
// kde loop, ~400
for (int j = 0; j < pnk_dc; j += blockDim.x) {
const int k = j + threadIdx.x;
if (k < pnk_dc) {
if (mylig->t[l] == kde_dc->t[k]) {
const float dx = mylig->coord_new.x[l] - kde_dc->x[k];
const float dy = mylig->coord_new.y[l] - kde_dc->y[k];
const float dz = mylig->coord_new.z[l] - kde_dc->z[k];
const float kde_dst_pow2 = dx * dx + dy * dy + dz * dz;
a_val[threadIdx.y][threadIdx.x] += expf (enepara_kde2_dc * kde_dst_pow2);
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (k < pnk_dc)
} // kde loop
} // if (l < lna_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && l < lna_dc && a_sz[threadIdx.y][0] != 0.0f)
ekde[threadIdx.y] += (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
} // lig loop
__syncthreads ();
if (bidx == 0) {
float eekde = 0.0f;
for (int i = 0; i < BDy; ++i)
eekde += ekde[i];
eekde = eekde / enepara_kde3_dc;
ekde[0] = eekde;
}
__syncthreads ();
#endif
#if 1
/* position restraints */
// lhm loop, ~11
for (int i = 0; i < pos_dc; i += blockDim.y) {
a_val[threadIdx.y][threadIdx.x] = 0.0f;
a_sz[threadIdx.y][threadIdx.x] = 0.0f;
const int m = i + threadIdx.y;
if (m < pos_dc) {
// lig loop, ~30
for (int j = 0; j < lna_dc; j += blockDim.x) {
const int l = j + threadIdx.x;
if (l < lna_dc) {
const int lig_n = mylig->n[l] + 1;
if (mcs_dc[m].x[lig_n] != MCS_INVALID_COORD) {
const float dx = mylig->coord_new.x[l] - mcs_dc[m].x[lig_n];
const float dy = mylig->coord_new.y[l] - mcs_dc[m].y[lig_n];
const float dz = mylig->coord_new.z[l] - mcs_dc[m].z[lig_n];
a_val[threadIdx.y][threadIdx.x] += dx * dx + dy * dy + dz * dz;
a_sz[threadIdx.y][threadIdx.x] += 1.0f;
}
} // if (l < lna_dc)
} // lig loop
} // if (m < pos_dc)
SumReduction2D_2_d (a_val, a_sz);
if (threadIdx.x == 0 && m < pos_dc) {
elhm[threadIdx.y] +=
mcs_dc[m].tcc *
sqrtf (a_val[threadIdx.y][0] / a_sz[threadIdx.y][0]);
}
} // lhm loop
__syncthreads ();
if (bidx == 0) {
float eelhm = 0.0f;
for (int i = 0; i < BDy; ++i)
eelhm += elhm[i];
// dropped the protection (if pos_dc != 0)
eelhm = logf (eelhm / pos_dc);
elhm[0] = eelhm;
}
__syncthreads ();
#endif
// energy edst e[8]
__shared__ float edst;
if (bidx == 0) {
const float dx = mylig->coord_new.center[0] - myprt->pocket_center[0];
const float dy = mylig->coord_new.center[1] - myprt->pocket_center[1];
const float dz = mylig->coord_new.center[2] - myprt->pocket_center[2];
edst = sqrtf (dx * dx + dy * dy + dz * dz);
}
__syncthreads ();
if (bidx == 0) {
evdw[0] = evdw[0] / lna_dc;
eele[0] = eele[0] / lna_dc;
epmf[0] = epmf[0] / lna_dc;
epsp[0] = epsp[0] / lna_dc;
ehdb[0] = ehdb[0] / lna_dc / sqrtf (2.0f * PI) * -1.0f;
// ehdb[0] = ehdb[0] / lna_dc; // using hdb2 is faster
ehpc[0] = ehpc[0] / lna_dc;
ekde[0] = ekde[0] / lna_dc;
#if IS_NORM == 1
// calculate normalized energy
evdw[0] = enepara_dc->a_para[0] * evdw[0] + enepara_dc->b_para[0];
eele[0] = enepara_dc->a_para[1] * eele[0] + enepara_dc->b_para[1];
epmf[0] = enepara_dc->a_para[2] * epmf[0] + enepara_dc->b_para[2];
ehpc[0] = enepara_dc->a_para[3] * ehpc[0] + enepara_dc->b_para[3];
ehdb[0] = enepara_dc->a_para[4] * ehdb[0] + enepara_dc->b_para[4];
edst = enepara_dc->a_para[5] * edst + enepara_dc->b_para[5];
epsp[0] = enepara_dc->a_para[6] * epsp[0] + enepara_dc->b_para[6];
ekde[0] = enepara_dc->a_para[7] * ekde[0] + enepara_dc->b_para[7];
elhm[0] = enepara_dc->a_para[8] * elhm[0] + enepara_dc->b_para[8];
#endif
#if IS_BAYE == 1
// calculate conditional prob belonging to high decoy
const float evdw_h = NormPdf(evdw[0], VDW_NORM_HIGH_LOC, VDW_NORM_HIGH_SCALE);
const float evdw_l = NormPdf(evdw[0], VDW_NORM_LOW_LOC, VDW_NORM_LOW_SCALE);
const float eele_h = CauchyPdf(eele[0], ELE_CAUCHY_HIGH_LOC, ELE_CAUCHY_HIGH_SCALE);
const float eele_l = CauchyPdf(eele[0], ELE_CAUCHY_LOW_LOC, ELE_CAUCHY_LOW_SCALE);
const float epmf_h = LogisticPdf(epmf[0], PMF_LOGISTIC_HIGH_LOC, PMF_LOGISTIC_HIGH_SCALE);
const float epmf_l = LogisticPdf(epmf[0], PMF_LOGISTIC_LOW_LOC, PMF_LOGISTIC_LOW_SCALE);
const float ehpc_h = WaldPdf(ehpc[0], HPC_WALD_HIGH_LOC, HPC_WALD_HIGH_SCALE);
const float ehpc_l = WaldPdf(ehpc[0], HPC_WALD_LOW_LOC, HPC_WALD_LOW_SCALE);
const float ehdb_h = NormPdf(ehdb[0], HDB_NORM_HIGH_LOC, HDB_NORM_HIGH_SCALE);
const float ehdb_l = NormPdf(ehdb[0], HDB_LOGISTIC_LOW_LOC, HDB_LOGISTIC_LOW_SCALE);
const float edst_h = LogisticPdf(edst, DST_LOGISTIC_HIGH_LOC, DST_LOGISTIC_HIGH_SCALE);
const float edst_l = LogisticPdf(edst, DST_LOGISTIC_LOW_LOC, DST_LOGISTIC_LOW_SCALE);
const float epsp_h = LogisticPdf(epsp[0], PSP_LOGISTIC_HIGH_LOC, PSP_LOGISTIC_HIGH_SCALE);
const float epsp_l = LogisticPdf(epsp[0], PSP_LAPLACE_LOW_LOC, PSP_LAPLACE_LOW_SCALE);
const float ekde_h = WaldPdf(ekde[0], KDE_WALD_HIGH_LOC, KDE_WALD_HIGH_SCALE);
const float ekde_l = WaldPdf(ekde[0], KDE_WALD_LOW_LOC, KDE_WALD_LOW_SCALE);
const float elhm_h = LogisticPdf(elhm[0], LHM_LOGISTIC_HIGH_LOC, LHM_LOGISTIC_HIGH_SCALE);
const float elhm_l = LogisticPdf(elhm[0], LHM_LOGISTIC_LOW_LOC, LHM_LOGISTIC_LOW_SCALE);
// calculate conditional prob
const float prob_h = log10f(evdw_h) + log10f(eele_h) + log10f(epmf_h) + log10f(ehpc_h) + log10f(ehdb_h)
+ log10f(edst_h) + log10f(epsp_h) + log10f(ekde_h) + log10f(elhm_h);
const float prob_l = log10f(evdw_l) + log10f(eele_l) + log10f(epmf_l) + log10f(ehpc_l) + log10f(ehdb_l)
+ log10f(edst_l) + log10f(epsp_l) + log10f(ekde_l) + log10f(elhm_l);
const float etotal = prob_l - prob_h;
#elif IS_BAYE == 0
#if IS_OPT == 1
const float etotal =
enepara_dc->w[0] * evdw[0] +
enepara_dc->w[1] * eele[0] +
enepara_dc->w[2] * epmf[0] +
enepara_dc->w[3] * ehpc[0] +
enepara_dc->w[4] * ehdb[0] +
enepara_dc->w[5] * edst +
enepara_dc->w[6] * epsp[0] +
enepara_dc->w[7] * ekde[0] +
enepara_dc->w[8] * elhm[0];
#elif IS_OPT == 0
const float etotal = evdw[0] + edst;
#endif
#endif
float * e = &mylig->energy_new.e[0];
e[0] = evdw[0];
e[1] = eele[0];
e[2] = epmf[0];
e[3] = epsp[0];
e[4] = ehdb[0];
e[5] = ehpc[0];
e[6] = ekde[0];
e[7] = elhm[0];
e[8] = edst;
e[9] = etotal;
// e[9] = edst;
}
}
|
a5c39c69b1aec5a7df5922678c5d5ff0cfed2b28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pearson_cor_gpu.cuh"
__global__ void pearson_cor_kernel(double *x, double *y, unsigned int *subsamples, double *cor, unsigned int n, unsigned int p, unsigned int m, unsigned int B){
register unsigned int col_id = blockIdx.y*MAX_BLOCKS + blockIdx.x;
if(col_id < p){
register unsigned int rows_id = blockIdx.z*MAX_THREADS + threadIdx.x;
if(rows_id < B){
unsigned int i;
unsigned int k;
extern __shared__ double s_y[];
double *s_xj = &s_y[n];
unsigned int *rows = &subsamples[rows_id * m];
double sum_x = 0.0, sum_y = 0.0, sum_xy = 0.0, sum_x_sq = 0.0, sum_y_sq = 0.0;
__shared__ double *xj;
if(threadIdx.x == 0) xj = &x[col_id * n];
__syncthreads();
i = threadIdx.x;
while(i < n){
s_xj[i] = xj[i];
s_y[i] = y[i];
i += blockDim.x;
}
__syncthreads();
double yk, xjk;
for(i=0; i<m; i++){
k = rows[i]-1;
yk = s_y[k];
xjk = s_xj[k];
sum_y += yk;
sum_y_sq += yk*yk;
sum_x += xjk;
sum_xy += xjk * yk;
sum_x_sq += xjk * xjk;
}
double sd = sqrt((m*sum_y_sq - sum_y * sum_y)*(m*sum_y_sq - sum_y * sum_y));
if(sd > DBL_EPSILON) cor[rows_id * p + col_id] = fabs((m*sum_xy - (sum_x * sum_y))/(sd));
else cor[rows_id * p + col_id] = 0.0;
}
}
}
extern "C" void pearson_cor_vector_gpu(unsigned int *subsamples, unsigned int m, unsigned int B, double *x, unsigned int n, unsigned int p, double *y, double *cor){
unsigned int *d_subsamples;
double *d_x, *d_y, *d_cor;
hipError_t err;
//allocate memory on GPU TODO: check for errors
hipMalloc( (void**)&d_subsamples, m * B * sizeof(unsigned int));
hipMalloc( (void**)&d_x, n * p * sizeof(double));
hipMalloc( (void**)&d_y, n * sizeof(double));
hipMalloc( (void**)&d_cor, B * p * sizeof(double));
//transfer data to GPU
hipMemcpy(d_subsamples, subsamples,m * B * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(d_x, x, n * p * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, n * sizeof(double), hipMemcpyHostToDevice);
//compute correlations
/*
dim3 grid(p/THREADS_PER_BLOCK +1, B);
pearson_cor_kernel<<<grid,THREADS_PER_BLOCK>>>(d_x, d_y, d_subsamples, d_cor, n, p, m, B);
*/
dim3 grid_blocks(MAX_BLOCKS, p/MAX_BLOCKS+1, B/MAX_THREADS+1);
dim3 grid_threads(MAX_THREADS);
hipLaunchKernelGGL(( pearson_cor_kernel), dim3(grid_blocks), dim3(grid_threads), sizeof(double) * n * 2 , 0, d_x, d_y, d_subsamples, d_cor, n, p, m, B);
err = hipGetLastError();
if (err != hipSuccess) printf("Cuda error: %s\n", hipGetErrorString(err));
//transfer correlations back
hipMemcpy(cor, d_cor, B * p * sizeof(double), hipMemcpyDeviceToHost);
//Free memory
hipFree(d_subsamples);
hipFree(d_x);
hipFree(d_y);
hipFree(d_cor);
err = hipGetLastError();
if (err != hipSuccess) printf("Cuda error: %s\n", hipGetErrorString(err));
}
| a5c39c69b1aec5a7df5922678c5d5ff0cfed2b28.cu | #include "pearson_cor_gpu.cuh"
__global__ void pearson_cor_kernel(double *x, double *y, unsigned int *subsamples, double *cor, unsigned int n, unsigned int p, unsigned int m, unsigned int B){
register unsigned int col_id = blockIdx.y*MAX_BLOCKS + blockIdx.x;
if(col_id < p){
register unsigned int rows_id = blockIdx.z*MAX_THREADS + threadIdx.x;
if(rows_id < B){
unsigned int i;
unsigned int k;
extern __shared__ double s_y[];
double *s_xj = &s_y[n];
unsigned int *rows = &subsamples[rows_id * m];
double sum_x = 0.0, sum_y = 0.0, sum_xy = 0.0, sum_x_sq = 0.0, sum_y_sq = 0.0;
__shared__ double *xj;
if(threadIdx.x == 0) xj = &x[col_id * n];
__syncthreads();
i = threadIdx.x;
while(i < n){
s_xj[i] = xj[i];
s_y[i] = y[i];
i += blockDim.x;
}
__syncthreads();
double yk, xjk;
for(i=0; i<m; i++){
k = rows[i]-1;
yk = s_y[k];
xjk = s_xj[k];
sum_y += yk;
sum_y_sq += yk*yk;
sum_x += xjk;
sum_xy += xjk * yk;
sum_x_sq += xjk * xjk;
}
double sd = sqrt((m*sum_y_sq - sum_y * sum_y)*(m*sum_y_sq - sum_y * sum_y));
if(sd > DBL_EPSILON) cor[rows_id * p + col_id] = fabs((m*sum_xy - (sum_x * sum_y))/(sd));
else cor[rows_id * p + col_id] = 0.0;
}
}
}
extern "C" void pearson_cor_vector_gpu(unsigned int *subsamples, unsigned int m, unsigned int B, double *x, unsigned int n, unsigned int p, double *y, double *cor){
unsigned int *d_subsamples;
double *d_x, *d_y, *d_cor;
cudaError_t err;
//allocate memory on GPU TODO: check for errors
cudaMalloc( (void**)&d_subsamples, m * B * sizeof(unsigned int));
cudaMalloc( (void**)&d_x, n * p * sizeof(double));
cudaMalloc( (void**)&d_y, n * sizeof(double));
cudaMalloc( (void**)&d_cor, B * p * sizeof(double));
//transfer data to GPU
cudaMemcpy(d_subsamples, subsamples,m * B * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, n * p * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, n * sizeof(double), cudaMemcpyHostToDevice);
//compute correlations
/*
dim3 grid(p/THREADS_PER_BLOCK +1, B);
pearson_cor_kernel<<<grid,THREADS_PER_BLOCK>>>(d_x, d_y, d_subsamples, d_cor, n, p, m, B);
*/
dim3 grid_blocks(MAX_BLOCKS, p/MAX_BLOCKS+1, B/MAX_THREADS+1);
dim3 grid_threads(MAX_THREADS);
pearson_cor_kernel<<<grid_blocks, grid_threads, sizeof(double) * n * 2 >>>(d_x, d_y, d_subsamples, d_cor, n, p, m, B);
err = cudaGetLastError();
if (err != cudaSuccess) printf("Cuda error: %s\n", cudaGetErrorString(err));
//transfer correlations back
cudaMemcpy(cor, d_cor, B * p * sizeof(double), cudaMemcpyDeviceToHost);
//Free memory
cudaFree(d_subsamples);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_cor);
err = cudaGetLastError();
if (err != cudaSuccess) printf("Cuda error: %s\n", cudaGetErrorString(err));
}
|
e887bfbbe0e7843cc11b2d9a09457cf772d89a26.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/ep/include/primitive/copy_nd.h"
#include "oneflow/core/ep/common/primitive/copy_nd.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include <hip/hip_runtime.h>
namespace oneflow {
namespace ep {
namespace primitive {
namespace {
template<size_t num_dims, size_t movement_size, typename IndexType>
__global__ void CopyNdKernel(CopyNdKernelParams<num_dims, IndexType> params) {
using T = typename std::aligned_storage<movement_size, movement_size>::type;
const T* src = reinterpret_cast<const T*>(params.src);
T* dst = reinterpret_cast<T*>(params.dst);
IndexType copy_index[num_dims];
IndexType src_index[num_dims];
IndexType dst_index[num_dims];
CUDA_1D_KERNEL_LOOP_T(IndexType, i, params.count) {
params.copy_index_helper.OffsetToNdIndex(i, copy_index);
#pragma unroll
for (size_t j = 0; j < num_dims; ++j) {
src_index[j] = params.src_pos[j] + copy_index[j];
dst_index[j] = params.dst_pos[j] + copy_index[j];
}
const IndexType src_offset = params.src_index_helper.NdIndexToOffset(src_index);
const IndexType dst_offset = params.dst_index_helper.NdIndexToOffset(dst_index);
dst[dst_offset] = src[src_offset];
}
}
template<size_t num_dims, size_t movement_size, typename IndexType>
void LaunchKernel(Stream* stream, CopyNdKernelParams<num_dims, IndexType> params) {
hipStream_t cuda_stream = stream->As<CudaStream>()->cuda_stream();
hipLaunchKernelGGL(( CopyNdKernel<num_dims, movement_size, IndexType>)
, dim3(BlocksNum4ThreadsNum(params.count)), dim3(kCudaThreadsNumPerBlock), 0, cuda_stream, params);
}
class CopyNdImpl : public CopyNd {
public:
OF_DISALLOW_COPY_AND_MOVE(CopyNdImpl);
CopyNdImpl() = default;
~CopyNdImpl() override = default;
void Launch(Stream* stream, DataType data_type, size_t num_dims, void* dst,
const int64_t* dst_dims, const int64_t* dst_pos, const void* src,
const int64_t* src_dims, const int64_t* src_pos,
const int64_t* extent) const override {
SimplifyThenLaunch(stream, data_type, num_dims, dst, dst_dims, dst_pos, src, src_dims, src_pos,
extent);
}
};
class CopyNdFactoryImpl : public CopyNdFactory {
public:
OF_DISALLOW_COPY_AND_MOVE(CopyNdFactoryImpl);
CopyNdFactoryImpl() = default;
~CopyNdFactoryImpl() override = default;
std::unique_ptr<CopyNd> New(size_t max_num_dims) override {
if (max_num_dims <= kMaxNumDims) {
return std::unique_ptr<CopyNd>(new CopyNdImpl());
} else {
return nullptr;
}
}
};
REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, CopyNdFactory, CopyNdFactoryImpl);
} // namespace
} // namespace primitive
} // namespace ep
} // namespace oneflow
| e887bfbbe0e7843cc11b2d9a09457cf772d89a26.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/ep/include/primitive/copy_nd.h"
#include "oneflow/core/ep/common/primitive/copy_nd.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include <cuda_runtime.h>
namespace oneflow {
namespace ep {
namespace primitive {
namespace {
template<size_t num_dims, size_t movement_size, typename IndexType>
__global__ void CopyNdKernel(CopyNdKernelParams<num_dims, IndexType> params) {
using T = typename std::aligned_storage<movement_size, movement_size>::type;
const T* src = reinterpret_cast<const T*>(params.src);
T* dst = reinterpret_cast<T*>(params.dst);
IndexType copy_index[num_dims];
IndexType src_index[num_dims];
IndexType dst_index[num_dims];
CUDA_1D_KERNEL_LOOP_T(IndexType, i, params.count) {
params.copy_index_helper.OffsetToNdIndex(i, copy_index);
#pragma unroll
for (size_t j = 0; j < num_dims; ++j) {
src_index[j] = params.src_pos[j] + copy_index[j];
dst_index[j] = params.dst_pos[j] + copy_index[j];
}
const IndexType src_offset = params.src_index_helper.NdIndexToOffset(src_index);
const IndexType dst_offset = params.dst_index_helper.NdIndexToOffset(dst_index);
dst[dst_offset] = src[src_offset];
}
}
template<size_t num_dims, size_t movement_size, typename IndexType>
void LaunchKernel(Stream* stream, CopyNdKernelParams<num_dims, IndexType> params) {
cudaStream_t cuda_stream = stream->As<CudaStream>()->cuda_stream();
CopyNdKernel<num_dims, movement_size, IndexType>
<<<BlocksNum4ThreadsNum(params.count), kCudaThreadsNumPerBlock, 0, cuda_stream>>>(params);
}
class CopyNdImpl : public CopyNd {
public:
OF_DISALLOW_COPY_AND_MOVE(CopyNdImpl);
CopyNdImpl() = default;
~CopyNdImpl() override = default;
void Launch(Stream* stream, DataType data_type, size_t num_dims, void* dst,
const int64_t* dst_dims, const int64_t* dst_pos, const void* src,
const int64_t* src_dims, const int64_t* src_pos,
const int64_t* extent) const override {
SimplifyThenLaunch(stream, data_type, num_dims, dst, dst_dims, dst_pos, src, src_dims, src_pos,
extent);
}
};
class CopyNdFactoryImpl : public CopyNdFactory {
public:
OF_DISALLOW_COPY_AND_MOVE(CopyNdFactoryImpl);
CopyNdFactoryImpl() = default;
~CopyNdFactoryImpl() override = default;
std::unique_ptr<CopyNd> New(size_t max_num_dims) override {
if (max_num_dims <= kMaxNumDims) {
return std::unique_ptr<CopyNd>(new CopyNdImpl());
} else {
return nullptr;
}
}
};
REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, CopyNdFactory, CopyNdFactoryImpl);
} // namespace
} // namespace primitive
} // namespace ep
} // namespace oneflow
|
22fdba32e8a85ba1387001dc9fdb8bcd369fcdc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**************************************************************************//**
*
* \file cumomentumtrainer.cu
* \author Daniel Strigl, Klaus Kofler
* \date Jul 13 2009
*
* $Id: cumomentumtrainer.cu 3558 2010-11-22 11:04:51Z klaus $
*
* \brief Implementation of cnnplus::CuMomentumTrainer.
*
*****************************************************************************/
#include "cudautils.hh"
///////////////////////////////////////////////////////////////////////////////
// CUDA kernels
__global__ void
updateW_kernel(float * const weights, size_t const strideWeights,
float const * const dWeights, size_t const strideDWeights,
float const * const mask, size_t const strideMask,
float * const deltaW, size_t const strideDeltaW,
size_t const rows, size_t const cols,
float const eta, float const alpha)
{
size_t const r = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y;
size_t const c = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (r >= rows || c >= cols)
return;
// Compute: deltaW = -eta * dWeights + alpha * deltaW
deltaW[CNN_UIMUL(r, strideDeltaW) + c] =
-eta * dWeights[CNN_UIMUL(r, strideDWeights) + c] +
alpha * deltaW [CNN_UIMUL(r, strideDeltaW ) + c];
// Compute: deltaW = deltaW .* mask
if (mask)
deltaW[CNN_UIMUL(r, strideDeltaW) + c] *= mask[CNN_UIMUL(r, strideMask) + c];
// Compute: weights = weights + deltaW
weights[CNN_UIMUL(r, strideWeights) + c] += deltaW[CNN_UIMUL(r, strideDeltaW) + c];
}
__global__ void
updateB_kernel(float * const biases,
float const * const dBiases,
float * const deltaB,
size_t const len,
float const eta, float const alpha)
{
size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (i >= len)
return;
// Compute: deltaB = -eta * dBiases + alpha * deltaB
deltaB[i] = -eta * dBiases[i] + alpha * deltaB[i];
// Compute: biases = biases + deltaB
biases[i] += deltaB[i];
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
#include "cumomentumtrainer.hh"
#include "cuneuralnet.hh"
#include "datasource.hh"
#include "error.hh"
#include "matvecli.hh"
#include "cumvli.hh"
#include <sstream>
#include <cstdio>
CNNPLUS_NS_BEGIN
///////////////////////////////////////////////////////////////////////////////
// CUDA kernel calls
template<typename T> void
update(typename NeuralNet<T>::TrainableParam const & trainableParam,
std::vector<T *> const & deltaW,
std::vector<size_t> const & strideDeltaW,
std::vector<T *> const & deltaB,
T const eta, T const alpha);
template<> void
update<float>(NeuralNet<float>::TrainableParam const & trainableParam,
std::vector<float *> const & deltaW,
std::vector<size_t> const & strideDeltaW,
std::vector<float *> const & deltaB,
float const eta, float const alpha)
{
for (size_t i = 0; i < trainableParam.size(); ++i)
{
//
// Update weights
//
{
size_t const rows = trainableParam[i].weights.rows;
size_t const cols = trainableParam[i].weights.cols;
float * const weights = trainableParam[i].weights.val;
float const * const dWeights = trainableParam[i].weights.dVal;
float const * const mask = trainableParam[i].weights.mask;
size_t const strideWeights = trainableParam[i].weights.strideVal;
size_t const strideDWeights = trainableParam[i].weights.strideDVal;
size_t const strideMask = trainableParam[i].weights.strideMask;
dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 const dimGrid((cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( updateW_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
weights, strideWeights, dWeights, strideDWeights,
mask, strideMask, deltaW[i], strideDeltaW[i],
rows, cols, eta, alpha);
CUDA_CHECK_ERROR("Kernel call 'updateW_kernel' failed");
}
//
// Update biases
//
{
size_t const len = trainableParam[i].biases.len;
float * const biases = trainableParam[i].biases.val;
float const * const dBiases = trainableParam[i].biases.dVal;
hipLaunchKernelGGL(( updateB_kernel), dim3((len + THREADS - 1) / THREADS), dim3(THREADS), 0, 0,
biases, dBiases, deltaB[i], len, eta, alpha);
CUDA_CHECK_ERROR("Kernel call 'updateB_kernel' failed");
}
}
}
template<> void
update<double>(NeuralNet<double>::TrainableParam const & trainableParam,
std::vector<double *> const & deltaW,
std::vector<size_t> const & strideDeltaW,
std::vector<double *> const & deltaB,
double const eta, double const alpha)
{
throw NotImplementedError("Not yet supported [CUDA<double>].");
}
///////////////////////////////////////////////////////////////////////////////
// CuMomentumTrainer implementation
template<typename T, class ErrFnc> bool
CuMomentumTrainer<T, ErrFnc>::Adapter::update(CuMomentumTrainer<T, ErrFnc> & trainer,
DataSource<T> & ds, size_t const epoch, int & batchSize, T & eta, T & alpha)
{
double errRate = 0;
T const error = trainer.test(ds, errRate);
printf("# epoch: %04d, error: %10.8f, error-rate: %8.4f%%\n",
epoch, error, errRate);
fflush(stdout);
return (epoch < maxEpochs_);
}
template<typename T, class ErrFnc> void
CuMomentumTrainer<T, ErrFnc>::init()
{
CNNPLUS_ASSERT(trainableParam_.empty());
this->net_.trainableParam(trainableParam_);
deltaW_.resize(trainableParam_.size());
strideDeltaW_.resize(trainableParam_.size());
deltaB_.resize(trainableParam_.size());
for (size_t i = 0; i < trainableParam_.size(); ++i)
{
// Weights
deltaW_[i] = cumvli::allocm<T>(
trainableParam_[i].weights.rows,
trainableParam_[i].weights.cols,
strideDeltaW_[i]);
cumvli::zerom<T>(
deltaW_[i], strideDeltaW_[i],
trainableParam_[i].weights.rows,
trainableParam_[i].weights.cols);
// Biases
deltaB_[i] = cumvli::allocv<T>(
trainableParam_[i].biases.len);
cumvli::zerov<T>(
deltaB_[i], trainableParam_[i].biases.len);
}
}
template<typename T, class ErrFnc>
CuMomentumTrainer<T, ErrFnc>::CuMomentumTrainer(
NeuralNet<T> & net, int const batchSize, T const eta, T const alpha, Adapter & adapter)
: Trainer<T, ErrFnc>(net), adapter_(&adapter), delAdapter_(false),
batchSize_(batchSize), eta_(eta), alpha_(alpha)
{
if (!dynamic_cast<CuNeuralNet<T>*>(&net))
throw ParameterError("net", "no CUDA enabled neural-net.");
else if (batchSize <= 0)
throw ParameterError("batchSize", "must be greater zero.");
else if (eta <= 0)
throw ParameterError("eta", "must be greater zero.");
else if (alpha < 0 || alpha > 1)
throw ParameterError("alpha", "must be between 0 and 1.");
init();
}
template<typename T, class ErrFnc>
CuMomentumTrainer<T, ErrFnc>::CuMomentumTrainer(
NeuralNet<T> & net, int const batchSize, T const eta, T const alpha, size_t const maxEpochs)
: Trainer<T, ErrFnc>(net), adapter_(new Adapter(maxEpochs)), delAdapter_(true),
batchSize_(batchSize), eta_(eta), alpha_(alpha)
{
if (!dynamic_cast<CuNeuralNet<T>*>(&net))
throw ParameterError("net", "no CUDA enabled neural-net.");
else if (batchSize <= 0)
throw ParameterError("batchSize", "must be greater zero.");
else if (eta <= 0)
throw ParameterError("eta", "must be greater zero.");
else if (alpha < 0 || alpha > 1)
throw ParameterError("alpha", "must be between 0 and 1.");
init();
}
template<typename T, class ErrFnc>
CuMomentumTrainer<T, ErrFnc>::CuMomentumTrainer(
NeuralNet<T> & net, T const eta, T const alpha, Adapter & adapter)
: Trainer<T, ErrFnc>(net), adapter_(&adapter), delAdapter_(false),
batchSize_(0), eta_(eta), alpha_(alpha)
{
if (!dynamic_cast<CuNeuralNet<T>*>(&net))
throw ParameterError("net", "no CUDA enabled neural-net.");
else if (eta <= 0)
throw ParameterError("eta", "must be greater zero.");
else if (alpha < 0 || alpha > 1)
throw ParameterError("alpha", "must be between 0 and 1.");
init();
}
template<typename T, class ErrFnc>
CuMomentumTrainer<T, ErrFnc>::CuMomentumTrainer(
NeuralNet<T> & net, T const eta, T const alpha, size_t const maxEpochs)
: Trainer<T, ErrFnc>(net), adapter_(new Adapter(maxEpochs)), delAdapter_(true),
batchSize_(0), eta_(eta), alpha_(alpha)
{
if (!dynamic_cast<CuNeuralNet<T>*>(&net))
throw ParameterError("net", "no CUDA enabled neural-net.");
else if (eta <= 0)
throw ParameterError("eta", "must be greater zero.");
else if (alpha < 0 || alpha > 1)
throw ParameterError("alpha", "must be between 0 and 1.");
init();
}
template<typename T, class ErrFnc>
CuMomentumTrainer<T, ErrFnc>::~CuMomentumTrainer()
{
if (delAdapter_) delete adapter_;
for (size_t i = 0; i < trainableParam_.size(); ++i)
{
cumvli::free<T>(deltaW_[i]);
cumvli::free<T>(deltaB_[i]);
}
}
template<typename T, class ErrFnc> void
CuMomentumTrainer<T, ErrFnc>::train(DataSource<T> & ds)
{
if (ds.sizeOut() != this->net_.sizeIn())
throw ParameterError("ds", "size doesn't match.");
// Reset gradients to zero
this->net_.reset();
// Set vector 'des_' to negative target value
if (this->net_.sizeOut() > 1)
matvecli::setv<T>(this->des_, this->net_.sizeOut(), this->targetVal_.NEG());
// Loop over epochs until 'adapter_->update(...)' returns 'false'
for (size_t epoch = 0; adapter_->update(*this, ds, epoch, batchSize_, eta_, alpha_); ++epoch)
{
ds.shuffle();
ds.rewind();
// Loop over all patterns
for (int i = 1; i <= ds.size(); ds.next(), ++i)
{
// Read pattern and desired label from data source
int const desLbl = ds.fprop(this->in_);
// Compute neural-net output
this->net_.fprop(this->in_, this->out_);
// Compute error
if (this->net_.sizeOut() > 1) {
CNNPLUS_ASSERT(0 <= desLbl && desLbl < static_cast<int>(this->net_.sizeOut()));
this->des_[desLbl] = this->targetVal_.POS();
this->errFnc_.fprop(this->out_, this->des_);
this->des_[desLbl] = this->targetVal_.NEG();
}
else {
this->des_[0] = static_cast<T>(desLbl);
this->errFnc_.fprop(this->out_, this->des_);
}
// Backpropagate error through network
this->errFnc_.bprop(this->out_);
this->net_.bprop(NULL, this->out_, true);
// Updates weights and biases
if ((batchSize_ > 0 && i % batchSize_ == 0) || (i == ds.size())) {
update<T>(trainableParam_, deltaW_, strideDeltaW_, deltaB_, eta_, alpha_);
this->net_.reset();
}
#ifdef CNNPLUS_PRINT_PROGRESS
printf("train %.2f%%\r", i * 100.0 / ds.size());
fflush(stdout);
#endif // CNNPLUS_PRINT_PROGRESS
}
#ifdef CNNPLUS_PRINT_PROGRESS
printf(" \r");
fflush(stdout);
#endif // CNNPLUS_PRINT_PROGRESS
}
}
template<typename T, class ErrFnc> std::string
CuMomentumTrainer<T, ErrFnc>::toString() const
{
std::stringstream ss;
ss << "CuMomentumTrainer["
<< this->errFnc_.toString()
<< "; targetVal=("
<< this->targetVal_.NEG() << ","
<< this->targetVal_.POS() << ")"
<< ", batchSize=" << batchSize_
<< ", eta=" << eta_
<< ", alpha=" << alpha_ << "]";
return ss.str();
}
/*! \addtogroup eti_grp Explicit Template Instantiation
@{
*/
template class CuMomentumTrainer< float, MeanSquaredError<float> >;
template class CuMomentumTrainer< double, MeanSquaredError<double> >;
template class CuMomentumTrainer< float, CrossEntropy<float> >;
template class CuMomentumTrainer< double, CrossEntropy<double> >;
/*! @} */
CNNPLUS_NS_END
| 22fdba32e8a85ba1387001dc9fdb8bcd369fcdc6.cu | /**************************************************************************//**
*
* \file cumomentumtrainer.cu
* \author Daniel Strigl, Klaus Kofler
* \date Jul 13 2009
*
* $Id: cumomentumtrainer.cu 3558 2010-11-22 11:04:51Z klaus $
*
* \brief Implementation of cnnplus::CuMomentumTrainer.
*
*****************************************************************************/
#include "cudautils.hh"
///////////////////////////////////////////////////////////////////////////////
// CUDA kernels
__global__ void
updateW_kernel(float * const weights, size_t const strideWeights,
float const * const dWeights, size_t const strideDWeights,
float const * const mask, size_t const strideMask,
float * const deltaW, size_t const strideDeltaW,
size_t const rows, size_t const cols,
float const eta, float const alpha)
{
size_t const r = CNN_UIMUL(blockIdx.y, blockDim.y) + threadIdx.y;
size_t const c = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (r >= rows || c >= cols)
return;
// Compute: deltaW = -eta * dWeights + alpha * deltaW
deltaW[CNN_UIMUL(r, strideDeltaW) + c] =
-eta * dWeights[CNN_UIMUL(r, strideDWeights) + c] +
alpha * deltaW [CNN_UIMUL(r, strideDeltaW ) + c];
// Compute: deltaW = deltaW .* mask
if (mask)
deltaW[CNN_UIMUL(r, strideDeltaW) + c] *= mask[CNN_UIMUL(r, strideMask) + c];
// Compute: weights = weights + deltaW
weights[CNN_UIMUL(r, strideWeights) + c] += deltaW[CNN_UIMUL(r, strideDeltaW) + c];
}
__global__ void
updateB_kernel(float * const biases,
float const * const dBiases,
float * const deltaB,
size_t const len,
float const eta, float const alpha)
{
size_t const i = CNN_UIMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if (i >= len)
return;
// Compute: deltaB = -eta * dBiases + alpha * deltaB
deltaB[i] = -eta * dBiases[i] + alpha * deltaB[i];
// Compute: biases = biases + deltaB
biases[i] += deltaB[i];
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
#include "cumomentumtrainer.hh"
#include "cuneuralnet.hh"
#include "datasource.hh"
#include "error.hh"
#include "matvecli.hh"
#include "cumvli.hh"
#include <sstream>
#include <cstdio>
CNNPLUS_NS_BEGIN
///////////////////////////////////////////////////////////////////////////////
// CUDA kernel calls
template<typename T> void
update(typename NeuralNet<T>::TrainableParam const & trainableParam,
std::vector<T *> const & deltaW,
std::vector<size_t> const & strideDeltaW,
std::vector<T *> const & deltaB,
T const eta, T const alpha);
template<> void
update<float>(NeuralNet<float>::TrainableParam const & trainableParam,
std::vector<float *> const & deltaW,
std::vector<size_t> const & strideDeltaW,
std::vector<float *> const & deltaB,
float const eta, float const alpha)
{
for (size_t i = 0; i < trainableParam.size(); ++i)
{
//
// Update weights
//
{
size_t const rows = trainableParam[i].weights.rows;
size_t const cols = trainableParam[i].weights.cols;
float * const weights = trainableParam[i].weights.val;
float const * const dWeights = trainableParam[i].weights.dVal;
float const * const mask = trainableParam[i].weights.mask;
size_t const strideWeights = trainableParam[i].weights.strideVal;
size_t const strideDWeights = trainableParam[i].weights.strideDVal;
size_t const strideMask = trainableParam[i].weights.strideMask;
dim3 const dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 const dimGrid((cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y);
updateW_kernel<<<dimGrid, dimBlock>>>(
weights, strideWeights, dWeights, strideDWeights,
mask, strideMask, deltaW[i], strideDeltaW[i],
rows, cols, eta, alpha);
CUDA_CHECK_ERROR("Kernel call 'updateW_kernel' failed");
}
//
// Update biases
//
{
size_t const len = trainableParam[i].biases.len;
float * const biases = trainableParam[i].biases.val;
float const * const dBiases = trainableParam[i].biases.dVal;
updateB_kernel<<<(len + THREADS - 1) / THREADS, THREADS>>>
(biases, dBiases, deltaB[i], len, eta, alpha);
CUDA_CHECK_ERROR("Kernel call 'updateB_kernel' failed");
}
}
}
template<> void
update<double>(NeuralNet<double>::TrainableParam const & trainableParam,
std::vector<double *> const & deltaW,
std::vector<size_t> const & strideDeltaW,
std::vector<double *> const & deltaB,
double const eta, double const alpha)
{
throw NotImplementedError("Not yet supported [CUDA<double>].");
}
///////////////////////////////////////////////////////////////////////////////
// CuMomentumTrainer implementation
template<typename T, class ErrFnc> bool
CuMomentumTrainer<T, ErrFnc>::Adapter::update(CuMomentumTrainer<T, ErrFnc> & trainer,
DataSource<T> & ds, size_t const epoch, int & batchSize, T & eta, T & alpha)
{
double errRate = 0;
T const error = trainer.test(ds, errRate);
printf("# epoch: %04d, error: %10.8f, error-rate: %8.4f%%\n",
epoch, error, errRate);
fflush(stdout);
return (epoch < maxEpochs_);
}
template<typename T, class ErrFnc> void
CuMomentumTrainer<T, ErrFnc>::init()
{
CNNPLUS_ASSERT(trainableParam_.empty());
this->net_.trainableParam(trainableParam_);
deltaW_.resize(trainableParam_.size());
strideDeltaW_.resize(trainableParam_.size());
deltaB_.resize(trainableParam_.size());
for (size_t i = 0; i < trainableParam_.size(); ++i)
{
// Weights
deltaW_[i] = cumvli::allocm<T>(
trainableParam_[i].weights.rows,
trainableParam_[i].weights.cols,
strideDeltaW_[i]);
cumvli::zerom<T>(
deltaW_[i], strideDeltaW_[i],
trainableParam_[i].weights.rows,
trainableParam_[i].weights.cols);
// Biases
deltaB_[i] = cumvli::allocv<T>(
trainableParam_[i].biases.len);
cumvli::zerov<T>(
deltaB_[i], trainableParam_[i].biases.len);
}
}
template<typename T, class ErrFnc>
CuMomentumTrainer<T, ErrFnc>::CuMomentumTrainer(
NeuralNet<T> & net, int const batchSize, T const eta, T const alpha, Adapter & adapter)
: Trainer<T, ErrFnc>(net), adapter_(&adapter), delAdapter_(false),
batchSize_(batchSize), eta_(eta), alpha_(alpha)
{
if (!dynamic_cast<CuNeuralNet<T>*>(&net))
throw ParameterError("net", "no CUDA enabled neural-net.");
else if (batchSize <= 0)
throw ParameterError("batchSize", "must be greater zero.");
else if (eta <= 0)
throw ParameterError("eta", "must be greater zero.");
else if (alpha < 0 || alpha > 1)
throw ParameterError("alpha", "must be between 0 and 1.");
init();
}
template<typename T, class ErrFnc>
CuMomentumTrainer<T, ErrFnc>::CuMomentumTrainer(
NeuralNet<T> & net, int const batchSize, T const eta, T const alpha, size_t const maxEpochs)
: Trainer<T, ErrFnc>(net), adapter_(new Adapter(maxEpochs)), delAdapter_(true),
batchSize_(batchSize), eta_(eta), alpha_(alpha)
{
if (!dynamic_cast<CuNeuralNet<T>*>(&net))
throw ParameterError("net", "no CUDA enabled neural-net.");
else if (batchSize <= 0)
throw ParameterError("batchSize", "must be greater zero.");
else if (eta <= 0)
throw ParameterError("eta", "must be greater zero.");
else if (alpha < 0 || alpha > 1)
throw ParameterError("alpha", "must be between 0 and 1.");
init();
}
template<typename T, class ErrFnc>
CuMomentumTrainer<T, ErrFnc>::CuMomentumTrainer(
NeuralNet<T> & net, T const eta, T const alpha, Adapter & adapter)
: Trainer<T, ErrFnc>(net), adapter_(&adapter), delAdapter_(false),
batchSize_(0), eta_(eta), alpha_(alpha)
{
if (!dynamic_cast<CuNeuralNet<T>*>(&net))
throw ParameterError("net", "no CUDA enabled neural-net.");
else if (eta <= 0)
throw ParameterError("eta", "must be greater zero.");
else if (alpha < 0 || alpha > 1)
throw ParameterError("alpha", "must be between 0 and 1.");
init();
}
template<typename T, class ErrFnc>
CuMomentumTrainer<T, ErrFnc>::CuMomentumTrainer(
NeuralNet<T> & net, T const eta, T const alpha, size_t const maxEpochs)
: Trainer<T, ErrFnc>(net), adapter_(new Adapter(maxEpochs)), delAdapter_(true),
batchSize_(0), eta_(eta), alpha_(alpha)
{
if (!dynamic_cast<CuNeuralNet<T>*>(&net))
throw ParameterError("net", "no CUDA enabled neural-net.");
else if (eta <= 0)
throw ParameterError("eta", "must be greater zero.");
else if (alpha < 0 || alpha > 1)
throw ParameterError("alpha", "must be between 0 and 1.");
init();
}
template<typename T, class ErrFnc>
CuMomentumTrainer<T, ErrFnc>::~CuMomentumTrainer()
{
if (delAdapter_) delete adapter_;
for (size_t i = 0; i < trainableParam_.size(); ++i)
{
cumvli::free<T>(deltaW_[i]);
cumvli::free<T>(deltaB_[i]);
}
}
template<typename T, class ErrFnc> void
CuMomentumTrainer<T, ErrFnc>::train(DataSource<T> & ds)
{
if (ds.sizeOut() != this->net_.sizeIn())
throw ParameterError("ds", "size doesn't match.");
// Reset gradients to zero
this->net_.reset();
// Set vector 'des_' to negative target value
if (this->net_.sizeOut() > 1)
matvecli::setv<T>(this->des_, this->net_.sizeOut(), this->targetVal_.NEG());
// Loop over epochs until 'adapter_->update(...)' returns 'false'
for (size_t epoch = 0; adapter_->update(*this, ds, epoch, batchSize_, eta_, alpha_); ++epoch)
{
ds.shuffle();
ds.rewind();
// Loop over all patterns
for (int i = 1; i <= ds.size(); ds.next(), ++i)
{
// Read pattern and desired label from data source
int const desLbl = ds.fprop(this->in_);
// Compute neural-net output
this->net_.fprop(this->in_, this->out_);
// Compute error
if (this->net_.sizeOut() > 1) {
CNNPLUS_ASSERT(0 <= desLbl && desLbl < static_cast<int>(this->net_.sizeOut()));
this->des_[desLbl] = this->targetVal_.POS();
this->errFnc_.fprop(this->out_, this->des_);
this->des_[desLbl] = this->targetVal_.NEG();
}
else {
this->des_[0] = static_cast<T>(desLbl);
this->errFnc_.fprop(this->out_, this->des_);
}
// Backpropagate error through network
this->errFnc_.bprop(this->out_);
this->net_.bprop(NULL, this->out_, true);
// Updates weights and biases
if ((batchSize_ > 0 && i % batchSize_ == 0) || (i == ds.size())) {
update<T>(trainableParam_, deltaW_, strideDeltaW_, deltaB_, eta_, alpha_);
this->net_.reset();
}
#ifdef CNNPLUS_PRINT_PROGRESS
printf("train %.2f%%\r", i * 100.0 / ds.size());
fflush(stdout);
#endif // CNNPLUS_PRINT_PROGRESS
}
#ifdef CNNPLUS_PRINT_PROGRESS
printf(" \r");
fflush(stdout);
#endif // CNNPLUS_PRINT_PROGRESS
}
}
template<typename T, class ErrFnc> std::string
CuMomentumTrainer<T, ErrFnc>::toString() const
{
std::stringstream ss;
ss << "CuMomentumTrainer["
<< this->errFnc_.toString()
<< "; targetVal=("
<< this->targetVal_.NEG() << ","
<< this->targetVal_.POS() << ")"
<< ", batchSize=" << batchSize_
<< ", eta=" << eta_
<< ", alpha=" << alpha_ << "]";
return ss.str();
}
/*! \addtogroup eti_grp Explicit Template Instantiation
@{
*/
template class CuMomentumTrainer< float, MeanSquaredError<float> >;
template class CuMomentumTrainer< double, MeanSquaredError<double> >;
template class CuMomentumTrainer< float, CrossEntropy<float> >;
template class CuMomentumTrainer< double, CrossEntropy<double> >;
/*! @} */
CNNPLUS_NS_END
|
e1f34c4846d8af88bee2f28329debc4b2c5c580a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
__device__
float reduce_op(float a, float b, int fn)
{
if (fn == 0)
return min(a, b);
else
return max(a, b);
}
// alternative manner for block reduce
__global__
void block_reduce_fn(const float* const in_arr, float* const out_arr, int fn)
{
extern __shared__ float s[];
int r = threadIdx.x;
int c = blockIdx.x;
int idx_1D = r + c * blockDim.x;
s[r] = in_arr[idx_1D];
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if (r % (stride * 2) == 0 && r + stride < blockDim.x)
{
s[r] = reduce_op(s[r + stride], s[r], fn);
}
__syncthreads();
}
if (r == 0)
out_arr[c] = s[r];
}
__device__
float reduce_fn(float* s, int ofst, int fn, float* const out_var)
{
for (; ofst >= 1; ofst /= 2)
{
if (threadIdx.x + ofst < blockDim.x)
{
s[threadIdx.x] = reduce_op(s[threadIdx.x + ofst], s[threadIdx.x], fn);
}
__syncthreads();
}
if (threadIdx.x == 0)
*out_var = s[threadIdx.x];
}
__global__
void reduce_fn_inner(const float* const in_arr, float* const out_var, int fn, int parent_idx)
{
extern __shared__ float s[];
int idx_1D = threadIdx.x + parent_idx * blockDim.x;
s[threadIdx.x] = in_arr[idx_1D];
__syncthreads();
// at most reduce 1024 elements per block
reduce_fn(s, 512, fn, out_var);
}
__global__
void reduce_fn_outer(const float* const in_arr, float* const buff_arr, int child_dim, int fn)
{
reduce_fn_inner << <1, child_dim, child_dim * sizeof(float) >> > (in_arr, buff_arr + threadIdx.x, fn, threadIdx.x);
hipDeviceSynchronize();
__syncthreads();
extern __shared__ float s[];
s[threadIdx.x] = buff_arr[threadIdx.x];
__syncthreads();
// at most reduce 1024 elements per block
reduce_fn(s, 512, fn, buff_arr + blockIdx.x);
}
__global__
void histogram_atomic(const float* const d_logLuminance,
unsigned int* const d_histo,
const size_t numBins,
const float min_logLum,
const float lumRange)
{
int r = threadIdx.x;
int c = blockIdx.x;
int idx_1D = r + c * blockDim.x;
int bin = (d_logLuminance[idx_1D] - min_logLum) / lumRange * numBins;
atomicAdd(&d_histo[bin], 1);
}
__global__
void block_scan_inclusive(unsigned int* const d_histo)
{
int idx_1D = threadIdx.x + blockIdx.x * blockDim.x;
// block scan
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if (threadIdx.x >= stride)
d_histo[idx_1D] += d_histo[idx_1D - stride];
__syncthreads();
}
}
__global__
void global_scan_inclusive(unsigned int* const d_histo)
{
int idx_1D = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int block_ofst;
if (threadIdx.x == 0)
{
block_ofst = 0;
for (int i = 0; i < blockIdx.x; i++)
{
block_ofst += d_histo[(i + 1) * blockDim.x - 1];
}
}
__syncthreads();
d_histo[idx_1D] += block_ofst;
}
//Run: ./<exe_file> memorial_raw_large.png
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
// step 1
float* d_reduce_min;
float* d_reduce_max;
checkCudaErrors(hipMalloc(&d_reduce_min, numCols * sizeof(float)));
checkCudaErrors(hipMalloc(&d_reduce_max, numCols * sizeof(float)));
// reduce implementation 1
block_reduce_fn << <numCols, numRows, numRows * sizeof(float) >> > (d_logLuminance, d_reduce_min, 0);
block_reduce_fn << <1, numCols, numCols * sizeof(float) >> > (d_reduce_min, d_reduce_min, 0);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipMemcpy(&min_logLum, d_reduce_min, sizeof(float), hipMemcpyDeviceToHost);
// reduce implementation 2
reduce_fn_outer << <1, numRows, numRows * sizeof(float) >> > (d_logLuminance, d_reduce_max, numCols, 1);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipMemcpy(&max_logLum, d_reduce_max, sizeof(float), hipMemcpyDeviceToHost);
checkCudaErrors(hipFree(d_reduce_min));
checkCudaErrors(hipFree(d_reduce_max));
// step 2, 3
float lumRange = max_logLum - min_logLum;
unsigned int* d_histo;
checkCudaErrors(hipMalloc(&d_histo, numBins * sizeof(unsigned int)));
checkCudaErrors(hipMemset(d_histo, 0, numBins * sizeof(unsigned int)));
histogram_atomic << <numCols, numRows >> > (d_logLuminance, d_histo, numBins, min_logLum, lumRange);
// step 4
block_scan_inclusive << <4, numBins / 4 >> > (d_histo);
global_scan_inclusive << <4, numBins / 4 >> > (d_histo);
checkCudaErrors(hipMemcpy(&d_cdf[1], d_histo, (numBins - 1) * sizeof(unsigned int), hipMemcpyDeviceToHost));
/*
unsigned int* h_tmp = new unsigned int[numBins];
checkCudaErrors(hipMemcpy(h_tmp, d_cdf, numBins * sizeof(unsigned int), hipMemcpyDeviceToHost));
for (int i = 0; i < numBins; i++)
{
printf("%u ", h_tmp[i]);
}
delete[] h_tmp;
*/
checkCudaErrors(hipFree(d_histo));
}
| e1f34c4846d8af88bee2f28329debc4b2c5c580a.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
__device__
float reduce_op(float a, float b, int fn)
{
if (fn == 0)
return min(a, b);
else
return max(a, b);
}
// alternative manner for block reduce
__global__
void block_reduce_fn(const float* const in_arr, float* const out_arr, int fn)
{
extern __shared__ float s[];
int r = threadIdx.x;
int c = blockIdx.x;
int idx_1D = r + c * blockDim.x;
s[r] = in_arr[idx_1D];
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if (r % (stride * 2) == 0 && r + stride < blockDim.x)
{
s[r] = reduce_op(s[r + stride], s[r], fn);
}
__syncthreads();
}
if (r == 0)
out_arr[c] = s[r];
}
__device__
float reduce_fn(float* s, int ofst, int fn, float* const out_var)
{
for (; ofst >= 1; ofst /= 2)
{
if (threadIdx.x + ofst < blockDim.x)
{
s[threadIdx.x] = reduce_op(s[threadIdx.x + ofst], s[threadIdx.x], fn);
}
__syncthreads();
}
if (threadIdx.x == 0)
*out_var = s[threadIdx.x];
}
__global__
void reduce_fn_inner(const float* const in_arr, float* const out_var, int fn, int parent_idx)
{
extern __shared__ float s[];
int idx_1D = threadIdx.x + parent_idx * blockDim.x;
s[threadIdx.x] = in_arr[idx_1D];
__syncthreads();
// at most reduce 1024 elements per block
reduce_fn(s, 512, fn, out_var);
}
__global__
void reduce_fn_outer(const float* const in_arr, float* const buff_arr, int child_dim, int fn)
{
reduce_fn_inner << <1, child_dim, child_dim * sizeof(float) >> > (in_arr, buff_arr + threadIdx.x, fn, threadIdx.x);
cudaDeviceSynchronize();
__syncthreads();
extern __shared__ float s[];
s[threadIdx.x] = buff_arr[threadIdx.x];
__syncthreads();
// at most reduce 1024 elements per block
reduce_fn(s, 512, fn, buff_arr + blockIdx.x);
}
__global__
void histogram_atomic(const float* const d_logLuminance,
unsigned int* const d_histo,
const size_t numBins,
const float min_logLum,
const float lumRange)
{
int r = threadIdx.x;
int c = blockIdx.x;
int idx_1D = r + c * blockDim.x;
int bin = (d_logLuminance[idx_1D] - min_logLum) / lumRange * numBins;
atomicAdd(&d_histo[bin], 1);
}
__global__
void block_scan_inclusive(unsigned int* const d_histo)
{
int idx_1D = threadIdx.x + blockIdx.x * blockDim.x;
// block scan
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if (threadIdx.x >= stride)
d_histo[idx_1D] += d_histo[idx_1D - stride];
__syncthreads();
}
}
__global__
void global_scan_inclusive(unsigned int* const d_histo)
{
int idx_1D = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int block_ofst;
if (threadIdx.x == 0)
{
block_ofst = 0;
for (int i = 0; i < blockIdx.x; i++)
{
block_ofst += d_histo[(i + 1) * blockDim.x - 1];
}
}
__syncthreads();
d_histo[idx_1D] += block_ofst;
}
//Run: ./<exe_file> memorial_raw_large.png
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
// step 1
float* d_reduce_min;
float* d_reduce_max;
checkCudaErrors(cudaMalloc(&d_reduce_min, numCols * sizeof(float)));
checkCudaErrors(cudaMalloc(&d_reduce_max, numCols * sizeof(float)));
// reduce implementation 1
block_reduce_fn << <numCols, numRows, numRows * sizeof(float) >> > (d_logLuminance, d_reduce_min, 0);
block_reduce_fn << <1, numCols, numCols * sizeof(float) >> > (d_reduce_min, d_reduce_min, 0);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
cudaMemcpy(&min_logLum, d_reduce_min, sizeof(float), cudaMemcpyDeviceToHost);
// reduce implementation 2
reduce_fn_outer << <1, numRows, numRows * sizeof(float) >> > (d_logLuminance, d_reduce_max, numCols, 1);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
cudaMemcpy(&max_logLum, d_reduce_max, sizeof(float), cudaMemcpyDeviceToHost);
checkCudaErrors(cudaFree(d_reduce_min));
checkCudaErrors(cudaFree(d_reduce_max));
// step 2, 3
float lumRange = max_logLum - min_logLum;
unsigned int* d_histo;
checkCudaErrors(cudaMalloc(&d_histo, numBins * sizeof(unsigned int)));
checkCudaErrors(cudaMemset(d_histo, 0, numBins * sizeof(unsigned int)));
histogram_atomic << <numCols, numRows >> > (d_logLuminance, d_histo, numBins, min_logLum, lumRange);
// step 4
block_scan_inclusive << <4, numBins / 4 >> > (d_histo);
global_scan_inclusive << <4, numBins / 4 >> > (d_histo);
checkCudaErrors(cudaMemcpy(&d_cdf[1], d_histo, (numBins - 1) * sizeof(unsigned int), cudaMemcpyDeviceToHost));
/*
unsigned int* h_tmp = new unsigned int[numBins];
checkCudaErrors(cudaMemcpy(h_tmp, d_cdf, numBins * sizeof(unsigned int), cudaMemcpyDeviceToHost));
for (int i = 0; i < numBins; i++)
{
printf("%u ", h_tmp[i]);
}
delete[] h_tmp;
*/
checkCudaErrors(cudaFree(d_histo));
}
|
c75d7ba2b4147b3372960cd8e75ee9f8a3006e5f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling
* approach. It has been written for clarity of exposition to illustrate various
* CUDA programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication. See also: V. Volkov and
* J. Demmel, "Benchmarking GPUs to tune dense linear algebra," in Proc. 2008
* ACM/IEEE Conf. on Supercomputing (SC '08), Piscataway, NJ: IEEE Press, 2008,
* pp. Art. 31:1-11.
*/
// System includes
#include <assert.h>
#include <stdio.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE>
__global__ void MatrixMulCUDA(float *C, float *A, float *B, int wA, int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv, int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A;
checkCudaErrors(hipHostMalloc(&h_A, mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B;
checkCudaErrors(hipHostMalloc(&h_B, mem_size_B));
hipStream_t stream;
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C;
checkCudaErrors(hipHostMalloc(&h_C, mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
// copy host memory to device
checkCudaErrors(
hipMemcpyAsync(d_A, h_A, mem_size_A, hipMemcpyHostToDevice, stream));
checkCudaErrors(
hipMemcpyAsync(d_B, h_B, mem_size_B, hipMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>)
, dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>)
, dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
checkCudaErrors(hipStreamSynchronize(stream));
// Record the start event
checkCudaErrors(hipEventRecord(start, stream));
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>)
, dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>)
, dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops =
(flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,"
" WorkgroupSize= %u threads/block\n",
gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(
hipMemcpyAsync(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i,
h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
checkCudaErrors(hipHostFree(h_A));
checkCudaErrors(hipHostFree(h_B));
checkCudaErrors(hipHostFree(h_C));
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
printf(
"\nNOTE: The CUDA Samples are not meant for performance"
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(
" Note: Outer matrix dimensions of A & B matrices"
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x,
dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| c75d7ba2b4147b3372960cd8e75ee9f8a3006e5f.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling
* approach. It has been written for clarity of exposition to illustrate various
* CUDA programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication. See also: V. Volkov and
* J. Demmel, "Benchmarking GPUs to tune dense linear algebra," in Proc. 2008
* ACM/IEEE Conf. on Supercomputing (SC '08), Piscataway, NJ: IEEE Press, 2008,
* pp. Art. 31:1-11.
*/
// System includes
#include <assert.h>
#include <stdio.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE>
__global__ void MatrixMulCUDA(float *C, float *A, float *B, int wA, int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv, int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A;
checkCudaErrors(cudaMallocHost(&h_A, mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B;
checkCudaErrors(cudaMallocHost(&h_B, mem_size_B));
cudaStream_t stream;
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C;
checkCudaErrors(cudaMallocHost(&h_C, mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
// copy host memory to device
checkCudaErrors(
cudaMemcpyAsync(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice, stream));
checkCudaErrors(
cudaMemcpyAsync(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
MatrixMulCUDA<16>
<<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32>
<<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
checkCudaErrors(cudaStreamSynchronize(stream));
// Record the start event
checkCudaErrors(cudaEventRecord(start, stream));
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
MatrixMulCUDA<16>
<<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32>
<<<grid, threads, 0, stream>>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops =
(flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,"
" WorkgroupSize= %u threads/block\n",
gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(
cudaMemcpyAsync(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i,
h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
checkCudaErrors(cudaFreeHost(h_A));
checkCudaErrors(cudaFreeHost(h_B));
checkCudaErrors(cudaFreeHost(h_C));
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
printf(
"\nNOTE: The CUDA Samples are not meant for performance"
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(
" Note: Outer matrix dimensions of A & B matrices"
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x,
dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
7a15ffef6c1873c6f59bea61c927ae7b7826cf12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
#include <climits>
const int BLOCK_SIZE = 1024;
void min_kernel(float * d_out, float *d_in, int size)
{
extern __shared__ float s_data[]; //is located in kernel call -> 3rd element needed with size of bytes
int Id = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
s_data[tid] = d_in[tid]; //load to shared memory
__synchthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>=1)
{
if(tid>=size)
{
s_data[Id+s] < s_data[Id] ? s_data[Id+s] : s_data[Id]; //setmin
}
__synchthreads();
}
if(tid==0)//only thread 0 can write to output array
{
d_out[blockIdx.x] = s_data[0];
}
}
__global__ void max_kernel(float * d_out, float *d_in)
{
extern __shared__ float s_data[]; //is located in kernel call -> 3rd element needed with size of bytes
int Id = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
s_data[tid] = d_in[tid]; //load to shared memory
__synchthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>=1)
{
if(tid<s)
{
s_data[Id+s] > s_data[Id] ? s_data[Id+s] : s_data[Id]; //setmax
}
__synchthreads();
}
if(tid==0)//only thread 0 can write to output array
{
d_out[blockIdx.x] = s_data[0];
}
}
__global__ void histo_atomic(unsigned int *out_histo,const float *d_in, int num_bins, int size,float min_val,float range)
{
int tid = threadIdx.x;
int id = tid + blockIdx.x * blockIdx.x;
if(global >= size)
{
return;
}
int bin = ((d_in[id]-min_val)*num_bins)/range;
bin = bin == num_bins ? num_bins -1 : bin; //max value bin is last bin of the histogram
atomicAdd(&(out_histo[bin]),1);
}
__global__ void scan_hillis_steele(unsigned int *d_out,unsigned int *d_in, unsigned int size)
{
extern __shared__ unsigned int temp[];
int tid = threadIdx.x;
int i_0 = 0;
int i_1 = 1;
if(tid>0)
{
temp[tid] = d_in[tid-1]; //exclusive
}
else
{
temp[tid] = 0;
}
__synchthreads();
for(int j = 1; j < size; j <<=1)
{
i_0 = 1 - i_0;
i_1 = 1 - i_1;
if(tid>=j)
{
temp[size*i_0+tid] = temp[size*i_1+tid] + temp[size*i_1+tid-j];
}
else
{
temp[size*i_0 + tid] = temp[size*i_0+tid];
}
__synchthreads();
}
d_out[tid] = temp[i_0*size+tid];
}
float reduce_min(const float* const d_logLuminance, int input_size)
{
int threads = BLOCK_SIZE;
float *d_cur = NULL;
int size = input_size;
int blocks = ceil(1.0*size/threads);
while(true)
{
float *d_out; //intermediate results
checkCudaErrors(hipMalloc(&d_out,blocks*sizeof(float)));
if(d_cur==NULL)
{
hipLaunchKernelGGL(( min_kernel), dim3(blocks),dim3(threads),threads*sizeof(float), 0, d_out,d_logLuminance,size);
}
else
{
hipLaunchKernelGGL(( min_kernel), dim3(blocks),dim3(threads),threads*sizeof(float), 0, d_out,d_cur,size);
}
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
//free last intermediate result
if (d_current_in != NULL) checkCudaErrors(hipFree(d_current_in));
if(blocks==1)
{
float h_output;
checkCudaErrors(hipMemcpy(%h_output,d_out,sizeof(float),hipMemcpyDeviceToHost));
return h_output;
}
size = blocks;
blocks = ceil(1.0f*size/threads);
if(blocks==0)
blocks++;
d_cur = d_out;
}
}
float reduce_max(const float* const d_logLuminance, int input_size)
{
int threads = BLOCK_SIZE;
float *d_cur = NULL;
int size = input_size;
int blocks = ceil(1.0*size/threads);
while(true)
{
float *d_out; //intermediate results
checkCudaErrors(hipMalloc(&d_out,blocks*sizeof(float)));
if(d_cur==NULL)
{
hipLaunchKernelGGL(( max_kernel), dim3(blocks),dim3(threads),threads*sizeof(float), 0, d_out,d_logLuminance,size);
}
else
{
hipLaunchKernelGGL(( max_kernel), dim3(blocks),dim3(threads),threads*sizeof(float), 0, d_out,d_cur,size);
}
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
//free last intermediate result
if (d_current_in != NULL) checkCudaErrors(hipFree(d_current_in));
if(blocks==1)
{
float h_output;
checkCudaErrors(hipMemcpy(%h_output,d_out,sizeof(float),hipMemcpyDeviceToHost));
return h_output;
}
size = blocks;
blocks = ceil(1.0f*size/threads);
if(blocks==0)
blocks++;
d_cur = d_out;
}
}
unsigned int* compute_histogram(const float* const d_logLuminance, int numBins, int input_size, float minVal, float rangeVals)
{
int threads = BLOCK_SIZE;
unsigned int* d_histo;
checkCudaErrors(hipMalloc(&d_histo, numBins * sizeof(unsigned int)));
checkCudaErrors(hipMemset(d_histo, 0, numBins * sizeof(unsigned int)));
int blocks = ceil(1.0f*input_size / threads);
histo_atomic << <blocks, threads >> >(d_histo, d_logLuminance, numBins, input_size, minVal, rangeVals);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
return d_histo;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//1) min and max
int input_size = numRows * numCols;
min_logLum = reduce_min(d_logLuminance,input_size);
max_logLum = reduce_max(d_logLuminance,input_size);
//2) Range
float range = max_logLum - min_logLum;
//3) Histogram Step
unsigned int *d_histo = compute_histogram(d_logLuminance,input_size,min_logLum,range);
//4) scan
hipLaunchKernelGGL(( scan_hillis_steele) , dim3(1),dim3(numBins),2*numBins*sizeof(unsigned int) , 0, d_cdf,histo,numBins);
checkCudaErrors(hipFree(histo));
}
| 7a15ffef6c1873c6f59bea61c927ae7b7826cf12.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
#include <climits>
const int BLOCK_SIZE = 1024;
void min_kernel(float * d_out, float *d_in, int size)
{
extern __shared__ float s_data[]; //is located in kernel call -> 3rd element needed with size of bytes
int Id = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
s_data[tid] = d_in[tid]; //load to shared memory
__synchthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>=1)
{
if(tid>=size)
{
s_data[Id+s] < s_data[Id] ? s_data[Id+s] : s_data[Id]; //setmin
}
__synchthreads();
}
if(tid==0)//only thread 0 can write to output array
{
d_out[blockIdx.x] = s_data[0];
}
}
__global__ void max_kernel(float * d_out, float *d_in)
{
extern __shared__ float s_data[]; //is located in kernel call -> 3rd element needed with size of bytes
int Id = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
s_data[tid] = d_in[tid]; //load to shared memory
__synchthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>=1)
{
if(tid<s)
{
s_data[Id+s] > s_data[Id] ? s_data[Id+s] : s_data[Id]; //setmax
}
__synchthreads();
}
if(tid==0)//only thread 0 can write to output array
{
d_out[blockIdx.x] = s_data[0];
}
}
__global__ void histo_atomic(unsigned int *out_histo,const float *d_in, int num_bins, int size,float min_val,float range)
{
int tid = threadIdx.x;
int id = tid + blockIdx.x * blockIdx.x;
if(global >= size)
{
return;
}
int bin = ((d_in[id]-min_val)*num_bins)/range;
bin = bin == num_bins ? num_bins -1 : bin; //max value bin is last bin of the histogram
atomicAdd(&(out_histo[bin]),1);
}
__global__ void scan_hillis_steele(unsigned int *d_out,unsigned int *d_in, unsigned int size)
{
extern __shared__ unsigned int temp[];
int tid = threadIdx.x;
int i_0 = 0;
int i_1 = 1;
if(tid>0)
{
temp[tid] = d_in[tid-1]; //exclusive
}
else
{
temp[tid] = 0;
}
__synchthreads();
for(int j = 1; j < size; j <<=1)
{
i_0 = 1 - i_0;
i_1 = 1 - i_1;
if(tid>=j)
{
temp[size*i_0+tid] = temp[size*i_1+tid] + temp[size*i_1+tid-j];
}
else
{
temp[size*i_0 + tid] = temp[size*i_0+tid];
}
__synchthreads();
}
d_out[tid] = temp[i_0*size+tid];
}
float reduce_min(const float* const d_logLuminance, int input_size)
{
int threads = BLOCK_SIZE;
float *d_cur = NULL;
int size = input_size;
int blocks = ceil(1.0*size/threads);
while(true)
{
float *d_out; //intermediate results
checkCudaErrors(cudaMalloc(&d_out,blocks*sizeof(float)));
if(d_cur==NULL)
{
min_kernel<<<blocks,threads,threads*sizeof(float)>>>(d_out,d_logLuminance,size);
}
else
{
min_kernel<<<blocks,threads,threads*sizeof(float)>>>(d_out,d_cur,size);
}
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
//free last intermediate result
if (d_current_in != NULL) checkCudaErrors(cudaFree(d_current_in));
if(blocks==1)
{
float h_output;
checkCudaErrors(cudaMemcpy(%h_output,d_out,sizeof(float),cudaMemcpyDeviceToHost));
return h_output;
}
size = blocks;
blocks = ceil(1.0f*size/threads);
if(blocks==0)
blocks++;
d_cur = d_out;
}
}
float reduce_max(const float* const d_logLuminance, int input_size)
{
int threads = BLOCK_SIZE;
float *d_cur = NULL;
int size = input_size;
int blocks = ceil(1.0*size/threads);
while(true)
{
float *d_out; //intermediate results
checkCudaErrors(cudaMalloc(&d_out,blocks*sizeof(float)));
if(d_cur==NULL)
{
max_kernel<<<blocks,threads,threads*sizeof(float)>>>(d_out,d_logLuminance,size);
}
else
{
max_kernel<<<blocks,threads,threads*sizeof(float)>>>(d_out,d_cur,size);
}
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
//free last intermediate result
if (d_current_in != NULL) checkCudaErrors(cudaFree(d_current_in));
if(blocks==1)
{
float h_output;
checkCudaErrors(cudaMemcpy(%h_output,d_out,sizeof(float),cudaMemcpyDeviceToHost));
return h_output;
}
size = blocks;
blocks = ceil(1.0f*size/threads);
if(blocks==0)
blocks++;
d_cur = d_out;
}
}
unsigned int* compute_histogram(const float* const d_logLuminance, int numBins, int input_size, float minVal, float rangeVals)
{
int threads = BLOCK_SIZE;
unsigned int* d_histo;
checkCudaErrors(cudaMalloc(&d_histo, numBins * sizeof(unsigned int)));
checkCudaErrors(cudaMemset(d_histo, 0, numBins * sizeof(unsigned int)));
int blocks = ceil(1.0f*input_size / threads);
histo_atomic << <blocks, threads >> >(d_histo, d_logLuminance, numBins, input_size, minVal, rangeVals);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
return d_histo;
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//1) min and max
int input_size = numRows * numCols;
min_logLum = reduce_min(d_logLuminance,input_size);
max_logLum = reduce_max(d_logLuminance,input_size);
//2) Range
float range = max_logLum - min_logLum;
//3) Histogram Step
unsigned int *d_histo = compute_histogram(d_logLuminance,input_size,min_logLum,range);
//4) scan
scan_hillis_steele <<<1,numBins,2*numBins*sizeof(unsigned int) >>>(d_cdf,histo,numBins);
checkCudaErrors(cudaFree(histo));
}
|
4de34d7e8e4f74f742a501b0c1f806f81350da88.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
/*
Inspired by the implementation of CustomPong.cu
@author jv
*/
extern "C"
{
/*
Draws entire map
inputWidth & inputHeight: map dimensions in pixels
talesWidth & height: no of tales
*/
__global__ void DrawTalesKernel(float *input, int inputWidth, int inputHeight,
int* tiles, int tilesWidth, int tilesHeight,
float *sprite, float *obstacleSprite, int2 spriteSize)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int inputSize = inputWidth * inputHeight;
int size = spriteSize.x * spriteSize.y;
int tilesSize = tilesWidth * tilesHeight;
int spriteId = id % size;
int px = spriteId % spriteSize.x;
int py = spriteId / spriteSize.x;
int tileId = id / size;
// position of my tale
int by = tileId / tilesWidth;
int bx = tileId % tilesWidth;
// original one, not upside down
//int inputOffset = ((int)by * spriteSize.y + py) * inputWidth + bx * spriteSize.x + px;
int inputOffset = ((tilesHeight-1-by) * spriteSize.y + py) * inputWidth
+ bx * spriteSize.x + px;
if (id < inputSize && inputOffset >= 0 && inputOffset < inputSize)
{
// obstacles are marked as 1
if(tiles[tileId] == 1)
{
input[inputOffset] = obstacleSprite[spriteId];
}
// everything else will be drawn as free and you can place anything over it
else
{
input[inputOffset] = sprite[spriteId];
}
}
}
/*
position: in tale coordinates
resolution: tale size in pixels
inputWidth: width of the visible area
*/
__global__ void DrawObjectKernel(float *input, int resolution, int inputWidth, int inputHeight,
float *sprite, int2 position, int2 spriteSize)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int inputSize = inputWidth * inputHeight;
int size = spriteSize.x * spriteSize.y;
int px = id % spriteSize.x;
int py = id / spriteSize.x;
// where to draw a pixel in the visual array
//int inputOffset = (position.y*resolution+ py) * inputWidth + (resolution*position.x + px);
// upside down version
int talesHeight = inputHeight/resolution;
int inputOffset = ((talesHeight-1-position.y) * resolution + py) * inputWidth
+ resolution*position.x + px;
if (id < size && inputOffset >= 0 && inputOffset < inputSize && sprite[id] < 1.0f)
{
input[inputOffset] = sprite[id];
}
}
__global__ void DrawFreeObjectKernel(float *input, int inputWidth, int inputHeight,
float *sprite, int2 position, int2 spriteSize)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int inputSize = inputWidth * inputHeight;
int size = spriteSize.x * spriteSize.y;
int px = id % spriteSize.x;
int py = id / spriteSize.x;
int inputOffset = (inputHeight - 1 - position.y + py) * inputWidth + position.x + px;
if (id < size && inputOffset >= 0 && inputOffset < inputSize && sprite[id] < 1.0f)
{
input[inputOffset] = sprite[id];
}
}
}
| 4de34d7e8e4f74f742a501b0c1f806f81350da88.cu | #include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
/*
Inspired by the implementation of CustomPong.cu
@author jv
*/
extern "C"
{
/*
Draws entire map
inputWidth & inputHeight: map dimensions in pixels
talesWidth & height: no of tales
*/
__global__ void DrawTalesKernel(float *input, int inputWidth, int inputHeight,
int* tiles, int tilesWidth, int tilesHeight,
float *sprite, float *obstacleSprite, int2 spriteSize)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int inputSize = inputWidth * inputHeight;
int size = spriteSize.x * spriteSize.y;
int tilesSize = tilesWidth * tilesHeight;
int spriteId = id % size;
int px = spriteId % spriteSize.x;
int py = spriteId / spriteSize.x;
int tileId = id / size;
// position of my tale
int by = tileId / tilesWidth;
int bx = tileId % tilesWidth;
// original one, not upside down
//int inputOffset = ((int)by * spriteSize.y + py) * inputWidth + bx * spriteSize.x + px;
int inputOffset = ((tilesHeight-1-by) * spriteSize.y + py) * inputWidth
+ bx * spriteSize.x + px;
if (id < inputSize && inputOffset >= 0 && inputOffset < inputSize)
{
// obstacles are marked as 1
if(tiles[tileId] == 1)
{
input[inputOffset] = obstacleSprite[spriteId];
}
// everything else will be drawn as free and you can place anything over it
else
{
input[inputOffset] = sprite[spriteId];
}
}
}
/*
position: in tale coordinates
resolution: tale size in pixels
inputWidth: width of the visible area
*/
__global__ void DrawObjectKernel(float *input, int resolution, int inputWidth, int inputHeight,
float *sprite, int2 position, int2 spriteSize)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int inputSize = inputWidth * inputHeight;
int size = spriteSize.x * spriteSize.y;
int px = id % spriteSize.x;
int py = id / spriteSize.x;
// where to draw a pixel in the visual array
//int inputOffset = (position.y*resolution+ py) * inputWidth + (resolution*position.x + px);
// upside down version
int talesHeight = inputHeight/resolution;
int inputOffset = ((talesHeight-1-position.y) * resolution + py) * inputWidth
+ resolution*position.x + px;
if (id < size && inputOffset >= 0 && inputOffset < inputSize && sprite[id] < 1.0f)
{
input[inputOffset] = sprite[id];
}
}
__global__ void DrawFreeObjectKernel(float *input, int inputWidth, int inputHeight,
float *sprite, int2 position, int2 spriteSize)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int inputSize = inputWidth * inputHeight;
int size = spriteSize.x * spriteSize.y;
int px = id % spriteSize.x;
int py = id / spriteSize.x;
int inputOffset = (inputHeight - 1 - position.y + py) * inputWidth + position.x + px;
if (id < size && inputOffset >= 0 && inputOffset < inputSize && sprite[id] < 1.0f)
{
input[inputOffset] = sprite[id];
}
}
}
|
b965a1aa0a191e935f074477f9f1f5c8253ae65d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
using namespace std;
__global__ void add(int *a, int *b, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index<n){
a[index] += b[index];
}
}
__global__ void rad(int *a, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index<n){
a[index] = 1;
}
}
int main(){
int N = 10000;
int M = 512;
int *a, *b;
int *d_a, *d_b;
int size = N * sizeof(int);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( rad), dim3((N+M-1)/M), dim3(M), 0, 0, d_a, size);
hipLaunchKernelGGL(( rad), dim3((N+M-1)/M), dim3(M), 0, 0, d_b, size);
hipLaunchKernelGGL(( add), dim3((N+M-1)/M), dim3(M), 0, 0, d_a, d_b, size);
hipMemcpy(a, d_a, size, hipMemcpyDeviceToHost);
hipMemcpy(b, d_b, size, hipMemcpyDeviceToHost);
int ret = 0;
for(int i=0; i<N; i++)
ret += a[i];
cout << ret << endl;
free(a); free(b);
hipFree(d_a); hipFree(d_b);
return 0;
}
| b965a1aa0a191e935f074477f9f1f5c8253ae65d.cu | #include <iostream>
#include <cstdlib>
using namespace std;
__global__ void add(int *a, int *b, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index<n){
a[index] += b[index];
}
}
__global__ void rad(int *a, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index<n){
a[index] = 1;
}
}
int main(){
int N = 10000;
int M = 512;
int *a, *b;
int *d_a, *d_b;
int size = N * sizeof(int);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
rad<<<(N+M-1)/M, M>>>(d_a, size);
rad<<<(N+M-1)/M, M>>>(d_b, size);
add<<<(N+M-1)/M, M>>>(d_a, d_b, size);
cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost);
cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
int ret = 0;
for(int i=0; i<N; i++)
ret += a[i];
cout << ret << endl;
free(a); free(b);
cudaFree(d_a); cudaFree(d_b);
return 0;
}
|
cdb57b37b0c3ba6ae28c8680dbf8063765bb56b9.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <random>
#include <vector>
#include "profiler.h"
#include <algorithm>
#include <functional>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
int main(void)
{
hipSetDevice(1);
const size_t n = 1 << 27;
profiler prof;
std::vector<float> a(n);
std::uniform_real_distribution<float> distribution(0.0, 1.0);
std::mt19937 engine;
auto generator = std::bind(distribution, engine);
std::generate_n(a.begin(), n, generator);
prof.tic("sort cpu");
std::sort(a.begin(), a.end());
prof.toc("sort cpu");
prof.tic("thrust data transfer");
thrust::device_vector<float> a_dev(a.begin(), a.end());
prof.toc("thrust data transfer");
prof.tic("sort thrust");
thrust::reduce(a_dev.begin(), a_dev.end());
prof.toc("sort thrust");
prof.report();
return 0;
}
| cdb57b37b0c3ba6ae28c8680dbf8063765bb56b9.cu | #include <iostream>
#include <random>
#include <vector>
#include "profiler.h"
#include <algorithm>
#include <functional>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
int main(void)
{
cudaSetDevice(1);
const size_t n = 1 << 27;
profiler prof;
std::vector<float> a(n);
std::uniform_real_distribution<float> distribution(0.0, 1.0);
std::mt19937 engine;
auto generator = std::bind(distribution, engine);
std::generate_n(a.begin(), n, generator);
prof.tic("sort cpu");
std::sort(a.begin(), a.end());
prof.toc("sort cpu");
prof.tic("thrust data transfer");
thrust::device_vector<float> a_dev(a.begin(), a.end());
prof.toc("thrust data transfer");
prof.tic("sort thrust");
thrust::reduce(a_dev.begin(), a_dev.end());
prof.toc("sort thrust");
prof.report();
return 0;
}
|
8b25d68f5882319f7cb4fc8df68da699b76cee11.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
const int width = 5;
void initiateMatrix(float *matrixM, float *matrixN){
int val = 0;
for(int i = 0; i < width; i++){
for(int j = 0; j < width; j++){
matrixM[i*width+j] = val;
matrixN[i*width+j] = val;
val++;
}
}
}
__global__ void MatrixAddKernel(float* M, float* N, float* P){
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
P[ty*width + tx] = M[ty*width + tx] + N[ty*width + tx];
}
__global__ void MatrixSubKernel(float* M, float* N, float* P){
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
P[ty*width + tx] = M[ty*width + tx] - N[ty*width + tx];
}
__global__ void MatrixMulKernel(float* M, float* N, float* P){
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0;
for(int i = 0; i < width; i++){
Pvalue += M[ty*width + i]*N[i*width + tx];
}
P[ty*width + tx] = Pvalue;
}
//single-threaded CPU version of mat_add, mat_sub and mat_mult
void MatrixAdd(float* M, float* N, float* P){
for(int i = 0; i < width; i++){
for(int j = 0; j < width; j++){
P[i*width + j] = M[i*width + j] + N[i*width + j];
}
}
}
void MatrixSub(float* M, float* N, float* P){
for(int i = 0; i < width; i++){
for(int j = 0; j < width; j++){
P[i*width + j] = M[i*width + j] - N[i*width + j];
}
}
}
void MatrixMul(float* M, float* N, float* P){
for(int i = 0; i < width; i++){
for(int j = 0; j < width; j++){
float Pvalue = 0;
for(int k = 0; k < width; k++){
Pvalue += M[i*width + k] * N[k*width + j];
}
P[i*width + j] = Pvalue;
}
}
}
void printResult(float* add, float* sub, float* mul){
std::cout<<"add:\n";
std::cout<<add[0];std::cout<<" "; std::cout<<add[1];std::cout<<" "; std::cout<<add[2];std::cout<<" "; std::cout<<add[3];std::cout<<" "; std::cout<<add[4];std::cout<<"\n";
std::cout<<add[5];std::cout<<" "; std::cout<<add[6];std::cout<<" "; std::cout<<add[7];std::cout<<" "; std::cout<<add[8];std::cout<<" "; std::cout<<add[9];std::cout<<"\n";
std::cout<<add[10];std::cout<<" "; std::cout<<add[11];std::cout<<" "; std::cout<<add[12];std::cout<<" "; std::cout<<add[13];std::cout<<" "; std::cout<<add[14];std::cout<<"\n";
std::cout<<add[15];std::cout<<" "; std::cout<<add[16];std::cout<<" "; std::cout<<add[17];std::cout<<" "; std::cout<<add[18];std::cout<<" "; std::cout<<add[19];std::cout<<"\n";
std::cout<<add[20];std::cout<<" "; std::cout<<add[21];std::cout<<" "; std::cout<<add[22];std::cout<<" "; std::cout<<add[23];std::cout<<" "; std::cout<<add[24];std::cout<<"\n";
std::cout<<"\n";
std::cout<<"sub:\n";
std::cout<<sub[0];std::cout<<" "; std::cout<<sub[1];std::cout<<" "; std::cout<<sub[2];std::cout<<" "; std::cout<<sub[3];std::cout<<" "; std::cout<<sub[4];std::cout<<"\n";
std::cout<<sub[5];std::cout<<" "; std::cout<<sub[6];std::cout<<" "; std::cout<<sub[7];std::cout<<" "; std::cout<<sub[8];std::cout<<" "; std::cout<<sub[9];std::cout<<"\n";
std::cout<<sub[10];std::cout<<" "; std::cout<<sub[11];std::cout<<" "; std::cout<<sub[12];std::cout<<" "; std::cout<<sub[13];std::cout<<" "; std::cout<<sub[14];std::cout<<"\n";
std::cout<<sub[15];std::cout<<" "; std::cout<<sub[16];std::cout<<" "; std::cout<<sub[17];std::cout<<" "; std::cout<<sub[18];std::cout<<" "; std::cout<<sub[19];std::cout<<"\n";
std::cout<<sub[20];std::cout<<" "; std::cout<<sub[21];std::cout<<" "; std::cout<<sub[22];std::cout<<" "; std::cout<<sub[23];std::cout<<" "; std::cout<<sub[24];std::cout<<"\n";
std::cout<<"\n";
std::cout<<"multi:\n";
std::cout<<mul[0];std::cout<<" "; std::cout<<mul[1];std::cout<<" "; std::cout<<mul[2];std::cout<<" "; std::cout<<mul[3];std::cout<<" "; std::cout<<mul[4];std::cout<<"\n";
std::cout<<mul[5];std::cout<<" "; std::cout<<mul[6];std::cout<<" "; std::cout<<mul[7];std::cout<<" "; std::cout<<mul[8];std::cout<<" "; std::cout<<mul[9];std::cout<<"\n";
std::cout<<mul[10];std::cout<<" "; std::cout<<mul[11];std::cout<<" "; std::cout<<mul[12];std::cout<<" "; std::cout<<mul[13];std::cout<<" "; std::cout<<mul[14];std::cout<<"\n";
std::cout<<mul[15];std::cout<<" "; std::cout<<mul[16];std::cout<<" "; std::cout<<mul[17];std::cout<<" "; std::cout<<mul[18];std::cout<<" "; std::cout<<mul[19];std::cout<<"\n";
std::cout<<mul[20];std::cout<<" "; std::cout<<mul[21];std::cout<<" "; std::cout<<mul[22];std::cout<<" "; std::cout<<mul[23];std::cout<<" "; std::cout<<mul[24];std::cout<<"\n";
std::cout<<"\n";
}
void main(){
float *matrixM = new float[width*width];
float *matrixN = new float[width*width];
initiateMatrix(matrixM, matrixN);
int size = width*width*sizeof(float);
float *Md, *Nd, *Pd_add, *Pd_sub, *Pd_mul, *P_add, *P_sub, *P_mul;
P_add = new float[width*width];
P_sub = new float[width*width];
P_mul = new float[width*width];
hipMalloc((void**)&Md,size);
hipMemcpy(Md, matrixM, size, hipMemcpyHostToDevice);
hipMalloc((void**)&Nd,size);
hipMemcpy(Nd, matrixN, size, hipMemcpyHostToDevice);
hipMalloc((void**)&Pd_add, size);
hipMalloc((void**)&Pd_sub, size);
hipMalloc((void**)&Pd_mul, size);
dim3 dimBlock(width, width);
dim3 dimGrid(1, 1);
hipLaunchKernelGGL(( MatrixAddKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd_add);
hipMemcpy(P_add, Pd_add, size, hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( MatrixSubKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd_sub);
hipMemcpy(P_sub, Pd_sub, size, hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd_mul);
hipMemcpy(P_mul, Pd_mul, size, hipMemcpyDeviceToHost);
hipFree(Md);
hipFree(Nd);
hipFree(Pd_add);
hipFree(Pd_sub);
hipFree(Pd_mul);
std::cout<<"cuda result: \n";
printResult(P_add, P_sub, P_mul);
MatrixAdd(matrixM, matrixN, P_add);
MatrixSub(matrixM, matrixN, P_sub);
MatrixMul(matrixM, matrixN, P_mul);
std::cout<<"single-threaded CPU result: \n";
printResult(P_add, P_sub, P_mul);
delete[] matrixM;
delete[] matrixN;
delete[] P_add;
delete[] P_sub;
delete[] P_mul;
} | 8b25d68f5882319f7cb4fc8df68da699b76cee11.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
const int width = 5;
void initiateMatrix(float *matrixM, float *matrixN){
int val = 0;
for(int i = 0; i < width; i++){
for(int j = 0; j < width; j++){
matrixM[i*width+j] = val;
matrixN[i*width+j] = val;
val++;
}
}
}
__global__ void MatrixAddKernel(float* M, float* N, float* P){
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
P[ty*width + tx] = M[ty*width + tx] + N[ty*width + tx];
}
__global__ void MatrixSubKernel(float* M, float* N, float* P){
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
P[ty*width + tx] = M[ty*width + tx] - N[ty*width + tx];
}
__global__ void MatrixMulKernel(float* M, float* N, float* P){
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0;
for(int i = 0; i < width; i++){
Pvalue += M[ty*width + i]*N[i*width + tx];
}
P[ty*width + tx] = Pvalue;
}
//single-threaded CPU version of mat_add, mat_sub and mat_mult
void MatrixAdd(float* M, float* N, float* P){
for(int i = 0; i < width; i++){
for(int j = 0; j < width; j++){
P[i*width + j] = M[i*width + j] + N[i*width + j];
}
}
}
void MatrixSub(float* M, float* N, float* P){
for(int i = 0; i < width; i++){
for(int j = 0; j < width; j++){
P[i*width + j] = M[i*width + j] - N[i*width + j];
}
}
}
void MatrixMul(float* M, float* N, float* P){
for(int i = 0; i < width; i++){
for(int j = 0; j < width; j++){
float Pvalue = 0;
for(int k = 0; k < width; k++){
Pvalue += M[i*width + k] * N[k*width + j];
}
P[i*width + j] = Pvalue;
}
}
}
void printResult(float* add, float* sub, float* mul){
std::cout<<"add:\n";
std::cout<<add[0];std::cout<<" "; std::cout<<add[1];std::cout<<" "; std::cout<<add[2];std::cout<<" "; std::cout<<add[3];std::cout<<" "; std::cout<<add[4];std::cout<<"\n";
std::cout<<add[5];std::cout<<" "; std::cout<<add[6];std::cout<<" "; std::cout<<add[7];std::cout<<" "; std::cout<<add[8];std::cout<<" "; std::cout<<add[9];std::cout<<"\n";
std::cout<<add[10];std::cout<<" "; std::cout<<add[11];std::cout<<" "; std::cout<<add[12];std::cout<<" "; std::cout<<add[13];std::cout<<" "; std::cout<<add[14];std::cout<<"\n";
std::cout<<add[15];std::cout<<" "; std::cout<<add[16];std::cout<<" "; std::cout<<add[17];std::cout<<" "; std::cout<<add[18];std::cout<<" "; std::cout<<add[19];std::cout<<"\n";
std::cout<<add[20];std::cout<<" "; std::cout<<add[21];std::cout<<" "; std::cout<<add[22];std::cout<<" "; std::cout<<add[23];std::cout<<" "; std::cout<<add[24];std::cout<<"\n";
std::cout<<"\n";
std::cout<<"sub:\n";
std::cout<<sub[0];std::cout<<" "; std::cout<<sub[1];std::cout<<" "; std::cout<<sub[2];std::cout<<" "; std::cout<<sub[3];std::cout<<" "; std::cout<<sub[4];std::cout<<"\n";
std::cout<<sub[5];std::cout<<" "; std::cout<<sub[6];std::cout<<" "; std::cout<<sub[7];std::cout<<" "; std::cout<<sub[8];std::cout<<" "; std::cout<<sub[9];std::cout<<"\n";
std::cout<<sub[10];std::cout<<" "; std::cout<<sub[11];std::cout<<" "; std::cout<<sub[12];std::cout<<" "; std::cout<<sub[13];std::cout<<" "; std::cout<<sub[14];std::cout<<"\n";
std::cout<<sub[15];std::cout<<" "; std::cout<<sub[16];std::cout<<" "; std::cout<<sub[17];std::cout<<" "; std::cout<<sub[18];std::cout<<" "; std::cout<<sub[19];std::cout<<"\n";
std::cout<<sub[20];std::cout<<" "; std::cout<<sub[21];std::cout<<" "; std::cout<<sub[22];std::cout<<" "; std::cout<<sub[23];std::cout<<" "; std::cout<<sub[24];std::cout<<"\n";
std::cout<<"\n";
std::cout<<"multi:\n";
std::cout<<mul[0];std::cout<<" "; std::cout<<mul[1];std::cout<<" "; std::cout<<mul[2];std::cout<<" "; std::cout<<mul[3];std::cout<<" "; std::cout<<mul[4];std::cout<<"\n";
std::cout<<mul[5];std::cout<<" "; std::cout<<mul[6];std::cout<<" "; std::cout<<mul[7];std::cout<<" "; std::cout<<mul[8];std::cout<<" "; std::cout<<mul[9];std::cout<<"\n";
std::cout<<mul[10];std::cout<<" "; std::cout<<mul[11];std::cout<<" "; std::cout<<mul[12];std::cout<<" "; std::cout<<mul[13];std::cout<<" "; std::cout<<mul[14];std::cout<<"\n";
std::cout<<mul[15];std::cout<<" "; std::cout<<mul[16];std::cout<<" "; std::cout<<mul[17];std::cout<<" "; std::cout<<mul[18];std::cout<<" "; std::cout<<mul[19];std::cout<<"\n";
std::cout<<mul[20];std::cout<<" "; std::cout<<mul[21];std::cout<<" "; std::cout<<mul[22];std::cout<<" "; std::cout<<mul[23];std::cout<<" "; std::cout<<mul[24];std::cout<<"\n";
std::cout<<"\n";
}
void main(){
float *matrixM = new float[width*width];
float *matrixN = new float[width*width];
initiateMatrix(matrixM, matrixN);
int size = width*width*sizeof(float);
float *Md, *Nd, *Pd_add, *Pd_sub, *Pd_mul, *P_add, *P_sub, *P_mul;
P_add = new float[width*width];
P_sub = new float[width*width];
P_mul = new float[width*width];
cudaMalloc((void**)&Md,size);
cudaMemcpy(Md, matrixM, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Nd,size);
cudaMemcpy(Nd, matrixN, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Pd_add, size);
cudaMalloc((void**)&Pd_sub, size);
cudaMalloc((void**)&Pd_mul, size);
dim3 dimBlock(width, width);
dim3 dimGrid(1, 1);
MatrixAddKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd_add);
cudaMemcpy(P_add, Pd_add, size, cudaMemcpyDeviceToHost);
MatrixSubKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd_sub);
cudaMemcpy(P_sub, Pd_sub, size, cudaMemcpyDeviceToHost);
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd_mul);
cudaMemcpy(P_mul, Pd_mul, size, cudaMemcpyDeviceToHost);
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd_add);
cudaFree(Pd_sub);
cudaFree(Pd_mul);
std::cout<<"cuda result: \n";
printResult(P_add, P_sub, P_mul);
MatrixAdd(matrixM, matrixN, P_add);
MatrixSub(matrixM, matrixN, P_sub);
MatrixMul(matrixM, matrixN, P_mul);
std::cout<<"single-threaded CPU result: \n";
printResult(P_add, P_sub, P_mul);
delete[] matrixM;
delete[] matrixN;
delete[] P_add;
delete[] P_sub;
delete[] P_mul;
} |
2752e3dcfac27c9329ba063ea670c82d85c1f244.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <array>
#include <iostream>
#include "CudaUtils.h"
#define WORK_WIDTH 9
#define WORK_HEIGHT 1
#define BLOCK_WIDTH 3
#define BLOCK_HEIGHT 1
#define WORK_TOTAL WORK_WIDTH * WORK_HEIGHT
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 dimGrid(ceil(WORK_WIDTH / (float) dimBlock.x / 2), ceil(WORK_HEIGHT / (float) dimBlock.y));
template <typename T>
__device__ void swap(T* a, T* b) {
T tmp = *a;
*a = *b;
*b = tmp;
}
template <typename T>
__global__ void inverseArray(T* devRes) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < WORK_WIDTH / 2) {
printf("Thread %d swapping pos %d with %d\n", col, col, WORK_WIDTH - 1 - col);
swap(&devRes[col], &devRes[WORK_WIDTH - 1 - col]);
}
}
int main() {
printf("Kernel will be invoked with: Block(%d,%d), Grid(%d,%d)\n", dimBlock.x, dimBlock.y, dimGrid.x, dimGrid.y);
std::array<float, WORK_TOTAL> src = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::array<float, WORK_TOTAL> res;
runWithProfiler([&]() {
CudaBuffer<float> devRes(WORK_TOTAL);
devRes.copyFrom(src);
hipLaunchKernelGGL(( inverseArray<float>) , dim3(dimGrid), dim3(dimBlock), 0, 0, devRes);
devRes.copyTo(res);
});
// Print the results
for (int col = 0; col < WORK_WIDTH; ++col) {
std::cout << res[col] << " ";
}
std::cout << std::endl;
return 0;
}
| 2752e3dcfac27c9329ba063ea670c82d85c1f244.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <array>
#include <iostream>
#include "CudaUtils.h"
#define WORK_WIDTH 9
#define WORK_HEIGHT 1
#define BLOCK_WIDTH 3
#define BLOCK_HEIGHT 1
#define WORK_TOTAL WORK_WIDTH * WORK_HEIGHT
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 dimGrid(ceil(WORK_WIDTH / (float) dimBlock.x / 2), ceil(WORK_HEIGHT / (float) dimBlock.y));
template <typename T>
__device__ void swap(T* a, T* b) {
T tmp = *a;
*a = *b;
*b = tmp;
}
template <typename T>
__global__ void inverseArray(T* devRes) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < WORK_WIDTH / 2) {
printf("Thread %d swapping pos %d with %d\n", col, col, WORK_WIDTH - 1 - col);
swap(&devRes[col], &devRes[WORK_WIDTH - 1 - col]);
}
}
int main() {
printf("Kernel will be invoked with: Block(%d,%d), Grid(%d,%d)\n", dimBlock.x, dimBlock.y, dimGrid.x, dimGrid.y);
std::array<float, WORK_TOTAL> src = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::array<float, WORK_TOTAL> res;
runWithProfiler([&]() {
CudaBuffer<float> devRes(WORK_TOTAL);
devRes.copyFrom(src);
inverseArray<float> <<<dimGrid, dimBlock>>> (devRes);
devRes.copyTo(res);
});
// Print the results
for (int col = 0; col < WORK_WIDTH; ++col) {
std::cout << res[col] << " ";
}
std::cout << std::endl;
return 0;
}
|
2fae39e0e249362b49bbe86b84b68f4d903489f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdlib.h>
#include <time.h>
#include <chrono>
#define TILE_WIDTH 32
typedef int dato;
using namespace std;
using namespace std::chrono;
template<class T>
void printMatrix(T *M, int rows, int cols) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
cout << M[i*cols + j] << '\t';
}
cout << endl;
}
cout << endl;
}
__global__
void matrixMulKernel(dato *M, dato *N, dato *P, int a, int b, int c) {
int col = blockIdx.y*blockDim.y + threadIdx.y;
int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row < a && col < c) {
dato Pvalue = 0;
for (int k = 0; k < b; ++k) {
Pvalue += M[row*b + k] * N[k*c + col];
}
P[row*c + col] = Pvalue;
}
}
__global__
void matrixMulKernel2(dato *M, dato *N, dato *P, int a, int b, int c) {
__shared__ dato Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ dato Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x,
by = blockIdx.y,
tx = threadIdx.x,
ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
dato pValue = 0;
for (int ph = 0; ph < ceil(b / (float)TILE_WIDTH); ++ph) {
if (row < a && (ph*TILE_WIDTH + tx) < b)
Mds[ty][tx] = M[row*b + ph * TILE_WIDTH + tx];
else
Mds[ty][tx] = 0;
if (col < c && (ph*TILE_WIDTH + ty) < b)
Nds[ty][tx] = N[(ph*TILE_WIDTH + ty)*c + col];
else
Nds[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
pValue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if(row<a && col<c)
P[row*c + col] = pValue;
}
__global__
void matrixMulKernel3(dato *M, dato *N, dato *P, int a, int b, int c) {
__shared__ dato Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ dato Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x,
by = blockIdx.y,
tx = threadIdx.x,
ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
dato pValue = 0;
for (int ph = 0; ph < ceil(b / (float)TILE_WIDTH); ph+=2) {
if (row < a && (ph*TILE_WIDTH + tx) < b)
Mds[ty][tx] = M[row*b + ph * TILE_WIDTH + tx];
else
Mds[ty][tx] = 0;
if (col < c && (ph*TILE_WIDTH + ty) < b)
Nds[ty][tx] = N[(ph*TILE_WIDTH + ty)*c + col];
else
Nds[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
pValue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if (row < a && col < c)
P[row*c + col] = pValue;
}
void matrixMul(dato *M, dato *N, dato *P, int a, int b, int c, int mode) {
dato *D_M, *D_N, *D_P;
int matrixSize1 = a * b;
int matrixSize2 = b * c;
int matrixSize3 = a * c;
hipMalloc((void**)&D_M, matrixSize1 * sizeof(dato));
hipMemcpy(D_M, M, matrixSize1 * sizeof(dato), hipMemcpyHostToDevice);
hipMalloc((void**)&D_N, matrixSize2 * sizeof(dato));
hipMemcpy(D_N, N, matrixSize2 * sizeof(dato), hipMemcpyHostToDevice);
hipMalloc((void**)&D_P, matrixSize3 * sizeof(dato));
auto start = high_resolution_clock::now();
switch (mode) {
case 0:
matrixMulKernel << <dim3(ceil(a / 32.0), ceil(c / 32.0), 1), dim3(32, 32, 1) >> > (D_M, D_N, D_P, a, b, c);
break;
case 1:
matrixMulKernel2 << <dim3(ceil(a / 32.0), ceil(c / 32.0), 1), dim3(32, 32, 1) >> > (D_M, D_N, D_P, a, b, c);
break;
case 2:
matrixMulKernel3 << <dim3(ceil(a / 32.0), ceil(c / 32.0), 1), dim3(32, 32, 1) >> > (D_M, D_N, D_P, a, b, c);
}
auto end = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(end - start);
cout <<"Tiempo en microsegundos: "<< duration.count() << endl;
hipMemcpy(P,D_P,matrixSize3*sizeof(dato),hipMemcpyDeviceToHost);
hipFree(D_M);
hipFree(D_N);
hipFree(D_P);
}
int main(){
//srand(time(NULL));
dato *M1, *M2, *M3;
int a=1000, b=1000, c=1000;
M1 = new dato[a*b];
M2 = new dato[b*c];
M3 = new dato[a*c];
for (int i = 0, top = a * b; i < top; ++i) {
M1[i] = rand()%5;
}
for (int i = 0, top = b * c; i < top; ++i) {
M2[i] = rand()%5;
}
//printMatrix(M1, a, b);
//printMatrix(M2, b, c);
matrixMul(M1, M2, M3, a, b, c, 2);
printMatrix(M3, a, c);
//matrixMul(M1, M2, M3, a, b, c, 1);
//printMatrix(M3, a, c);
//matrixMul(M1, M2, M3, a, b, c, 0);
return 0;
} | 2fae39e0e249362b49bbe86b84b68f4d903489f4.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdlib.h>
#include <time.h>
#include <chrono>
#define TILE_WIDTH 32
typedef int dato;
using namespace std;
using namespace std::chrono;
template<class T>
void printMatrix(T *M, int rows, int cols) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
cout << M[i*cols + j] << '\t';
}
cout << endl;
}
cout << endl;
}
__global__
void matrixMulKernel(dato *M, dato *N, dato *P, int a, int b, int c) {
int col = blockIdx.y*blockDim.y + threadIdx.y;
int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row < a && col < c) {
dato Pvalue = 0;
for (int k = 0; k < b; ++k) {
Pvalue += M[row*b + k] * N[k*c + col];
}
P[row*c + col] = Pvalue;
}
}
__global__
void matrixMulKernel2(dato *M, dato *N, dato *P, int a, int b, int c) {
__shared__ dato Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ dato Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x,
by = blockIdx.y,
tx = threadIdx.x,
ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
dato pValue = 0;
for (int ph = 0; ph < ceil(b / (float)TILE_WIDTH); ++ph) {
if (row < a && (ph*TILE_WIDTH + tx) < b)
Mds[ty][tx] = M[row*b + ph * TILE_WIDTH + tx];
else
Mds[ty][tx] = 0;
if (col < c && (ph*TILE_WIDTH + ty) < b)
Nds[ty][tx] = N[(ph*TILE_WIDTH + ty)*c + col];
else
Nds[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
pValue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if(row<a && col<c)
P[row*c + col] = pValue;
}
__global__
void matrixMulKernel3(dato *M, dato *N, dato *P, int a, int b, int c) {
__shared__ dato Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ dato Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x,
by = blockIdx.y,
tx = threadIdx.x,
ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
dato pValue = 0;
for (int ph = 0; ph < ceil(b / (float)TILE_WIDTH); ph+=2) {
if (row < a && (ph*TILE_WIDTH + tx) < b)
Mds[ty][tx] = M[row*b + ph * TILE_WIDTH + tx];
else
Mds[ty][tx] = 0;
if (col < c && (ph*TILE_WIDTH + ty) < b)
Nds[ty][tx] = N[(ph*TILE_WIDTH + ty)*c + col];
else
Nds[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
pValue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if (row < a && col < c)
P[row*c + col] = pValue;
}
void matrixMul(dato *M, dato *N, dato *P, int a, int b, int c, int mode) {
dato *D_M, *D_N, *D_P;
int matrixSize1 = a * b;
int matrixSize2 = b * c;
int matrixSize3 = a * c;
cudaMalloc((void**)&D_M, matrixSize1 * sizeof(dato));
cudaMemcpy(D_M, M, matrixSize1 * sizeof(dato), cudaMemcpyHostToDevice);
cudaMalloc((void**)&D_N, matrixSize2 * sizeof(dato));
cudaMemcpy(D_N, N, matrixSize2 * sizeof(dato), cudaMemcpyHostToDevice);
cudaMalloc((void**)&D_P, matrixSize3 * sizeof(dato));
auto start = high_resolution_clock::now();
switch (mode) {
case 0:
matrixMulKernel << <dim3(ceil(a / 32.0), ceil(c / 32.0), 1), dim3(32, 32, 1) >> > (D_M, D_N, D_P, a, b, c);
break;
case 1:
matrixMulKernel2 << <dim3(ceil(a / 32.0), ceil(c / 32.0), 1), dim3(32, 32, 1) >> > (D_M, D_N, D_P, a, b, c);
break;
case 2:
matrixMulKernel3 << <dim3(ceil(a / 32.0), ceil(c / 32.0), 1), dim3(32, 32, 1) >> > (D_M, D_N, D_P, a, b, c);
}
auto end = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(end - start);
cout <<"Tiempo en microsegundos: "<< duration.count() << endl;
cudaMemcpy(P,D_P,matrixSize3*sizeof(dato),cudaMemcpyDeviceToHost);
cudaFree(D_M);
cudaFree(D_N);
cudaFree(D_P);
}
int main(){
//srand(time(NULL));
dato *M1, *M2, *M3;
int a=1000, b=1000, c=1000;
M1 = new dato[a*b];
M2 = new dato[b*c];
M3 = new dato[a*c];
for (int i = 0, top = a * b; i < top; ++i) {
M1[i] = rand()%5;
}
for (int i = 0, top = b * c; i < top; ++i) {
M2[i] = rand()%5;
}
//printMatrix(M1, a, b);
//printMatrix(M2, b, c);
matrixMul(M1, M2, M3, a, b, c, 2);
printMatrix(M3, a, c);
//matrixMul(M1, M2, M3, a, b, c, 1);
//printMatrix(M3, a, c);
//matrixMul(M1, M2, M3, a, b, c, 0);
return 0;
} |
ca275c647c04b8d1267caae8c3cbbd8b86dfa67a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************
streamcluster_cuda.cu
: parallelized code of streamcluster
- original code from PARSEC Benchmark Suite
- parallelization with CUDA API has been applied by
Shawn Sang-Ha Lee - [email protected]
University of Virginia
Department of Electrical and Computer Engineering
Department of Computer Science
***********************************************/
#include "streamcluster_header.cu"
using namespace std;
// AUTO-ERROR CHECK FOR ALL CUDA FUNCTIONS
#define CUDA_SAFE_CALL( call) do { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define THREADS_PER_BLOCK 128
#define MAXBLOCKS 65536
#define CUDATIME
// host memory
float *work_mem_h;
float *coord_h;
// device memory
float *work_mem_d;
float *coord_d;
int *center_table_d;
bool *switch_membership_d;
Point *p;
static int iter = 0; // counter for total# of iteration
//=======================================
// Euclidean Distance
//=======================================
__device__ float
d_dist(int p1, int p2, int num, int dim, float *coord_d)
{
float retval = 0.0;
for(int i = 0; i < dim; i++){
float tmp = coord_d[(i*num)+p1] - coord_d[(i*num)+p2];
retval += tmp * tmp;
}
return retval;
}
//=======================================
// Kernel - Compute Cost
//=======================================
__global__ void
kernel_compute_cost(int num, int dim, long x, Point *p, int K, int stride,
float *coord_d, float *work_mem_d, int *center_table_d, bool *switch_membership_d)
{
// block ID and global thread ID
const int bid = blockIdx.x + gridDim.x * blockIdx.y;
const int tid = blockDim.x * bid + threadIdx.x;
if(tid < num)
{
float *lower = &work_mem_d[tid*stride];
// cost between this point and point[x]: euclidean distance multiplied by weight
float x_cost = d_dist(tid, x, num, dim, coord_d) * p[tid].weight;
// if computed cost is less then original (it saves), mark it as to reassign
if ( x_cost < p[tid].cost )
{
switch_membership_d[tid] = 1;
lower[K] += x_cost - p[tid].cost;
}
// if computed cost is larger, save the difference
else
{
lower[center_table_d[p[tid].assign]] += p[tid].cost - x_cost;
}
}
}
//=======================================
// Allocate Device Memory
//=======================================
void allocDevMem(int num, int dim)
{
CUDA_SAFE_CALL( hipMalloc((void**) ¢er_table_d, num * sizeof(int)) );
CUDA_SAFE_CALL( hipMalloc((void**) &switch_membership_d, num * sizeof(bool)) );
CUDA_SAFE_CALL( hipMalloc((void**) &p, num * sizeof(Point)) );
CUDA_SAFE_CALL( hipMalloc((void**) &coord_d, num * dim * sizeof(float)) );
}
//=======================================
// Allocate Host Memory
//=======================================
void allocHostMem(int num, int dim)
{
coord_h = (float*) malloc( num * dim * sizeof(float) );
}
//=======================================
// Free Device Memory
//=======================================
void freeDevMem()
{
CUDA_SAFE_CALL( hipFree(center_table_d) );
CUDA_SAFE_CALL( hipFree(switch_membership_d) );
CUDA_SAFE_CALL( hipFree(p) );
CUDA_SAFE_CALL( hipFree(coord_d) );
}
//=======================================
// Free Host Memory
//=======================================
void freeHostMem()
{
free(coord_h);
}
//=======================================
// pgain Entry - CUDA SETUP + CUDA CALL
//=======================================
float pgain( long x, Points *points, float z, long int *numcenters, int kmax, bool *is_center, int *center_table, bool *switch_membership, bool isCoordChanged,
double *serial_t, double *cpu_to_gpu_t, double *gpu_to_cpu_t, double *alloc_t, double *kernel_t, double *free_t)
{
#ifdef CUDATIME
float tmp_t;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
hipError_t error;
int stride = *numcenters + 1; // size of each work_mem segment
int K = *numcenters ; // number of centers
int num = points->num; // number of points
int dim = points->dim; // number of dimension
int nThread = num; // number of threads == number of data points
//=========================================
// ALLOCATE HOST MEMORY + DATA PREPARATION
//=========================================
work_mem_h = (float*) malloc(stride * (nThread + 1) * sizeof(float) );
// Only on the first iteration
if(iter == 0)
{
allocHostMem(num, dim);
}
// build center-index table
int count = 0;
for( int i=0; i<num; i++)
{
if( is_center[i] )
{
center_table[i] = count++;
}
}
// Extract 'coord'
// Only if first iteration OR coord has changed
if(isCoordChanged || iter == 0)
{
for(int i=0; i<dim; i++)
{
for(int j=0; j<num; j++)
{
coord_h[ (num*i)+j ] = points->p[j].coord[i];
}
}
}
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*serial_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// ALLOCATE GPU MEMORY
//=======================================
CUDA_SAFE_CALL( hipMalloc((void**) &work_mem_d, stride * (nThread + 1) * sizeof(float)) );
// Only on the first iteration
if( iter == 0 )
{
allocDevMem(num, dim);
}
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*alloc_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// CPU-TO-GPU MEMORY COPY
//=======================================
// Only if first iteration OR coord has changed
if(isCoordChanged || iter == 0)
{
CUDA_SAFE_CALL( hipMemcpy(coord_d, coord_h, num * dim * sizeof(float), hipMemcpyHostToDevice) );
}
CUDA_SAFE_CALL( hipMemcpy(center_table_d, center_table, num * sizeof(int), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy(p, points->p, num * sizeof(Point), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemset((void*) switch_membership_d, 0, num * sizeof(bool)) );
CUDA_SAFE_CALL( hipMemset((void*) work_mem_d, 0, stride * (nThread + 1) * sizeof(float)) );
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*cpu_to_gpu_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// KERNEL: CALCULATE COST
//=======================================
// Determine the number of thread blocks in the x- and y-dimension
int num_blocks = (int) ((float) (num + THREADS_PER_BLOCK - 1) / (float) THREADS_PER_BLOCK);
int num_blocks_y = (int) ((float) (num_blocks + MAXBLOCKS - 1) / (float) MAXBLOCKS);
int num_blocks_x = (int) ((float) (num_blocks+num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
hipLaunchKernelGGL(( kernel_compute_cost), dim3(grid_size), dim3(THREADS_PER_BLOCK), 0, 0,
num, // in: # of data
dim, // in: dimension of point coordinates
x, // in: point to open a center at
p, // in: data point array
K, // in: number of centers
stride, // in: size of each work_mem segment
coord_d, // in: array of point coordinates
work_mem_d, // out: cost and lower field array
center_table_d, // in: center index table
switch_membership_d // out: changes in membership
);
hipDeviceSynchronize();
// error check
error = hipGetLastError();
if (error != hipSuccess)
{
printf("kernel error: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*kernel_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// GPU-TO-CPU MEMORY COPY
//=======================================
CUDA_SAFE_CALL( hipMemcpy(work_mem_h, work_mem_d, stride * (nThread + 1) * sizeof(float), hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipMemcpy(switch_membership, switch_membership_d, num * sizeof(bool), hipMemcpyDeviceToHost) );
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*gpu_to_cpu_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// CPU (SERIAL) WORK
//=======================================
int number_of_centers_to_close = 0;
float gl_cost_of_opening_x = z;
float *gl_lower = &work_mem_h[stride * nThread];
// compute the number of centers to close if we are to open i
for(int i=0; i < num; i++)
{
if( is_center[i] )
{
float low = z;
for( int j = 0; j < num; j++ )
{
low += work_mem_h[ j*stride + center_table[i] ];
}
gl_lower[center_table[i]] = low;
if ( low > 0 )
{
++number_of_centers_to_close;
work_mem_h[i*stride+K] -= low;
}
}
gl_cost_of_opening_x += work_mem_h[i*stride+K];
}
//if opening a center at x saves cost (i.e. cost is negative) do so; otherwise, do nothing
if ( gl_cost_of_opening_x < 0 )
{
for(int i = 0; i < num; i++)
{
bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ;
if ( switch_membership[i] || close_center )
{
points->p[i].cost = dist(points->p[i], points->p[x], dim) * points->p[i].weight;
points->p[i].assign = x;
}
}
for(int i = 0; i < num; i++)
{
if( is_center[i] && gl_lower[center_table[i]] > 0 )
{
is_center[i] = false;
}
}
if( x >= 0 && x < num)
{
is_center[x] = true;
}
*numcenters = *numcenters + 1 - number_of_centers_to_close;
}
else
{
gl_cost_of_opening_x = 0;
}
//=======================================
// DEALLOCATE HOST MEMORY
//=======================================
free(work_mem_h);
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*serial_t += (double) tmp_t;
hipEventRecord(start,0);
#endif
//=======================================
// DEALLOCATE GPU MEMORY
//=======================================
CUDA_SAFE_CALL( hipFree(work_mem_d) );
#ifdef CUDATIME
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_t, start, stop);
*free_t += (double) tmp_t;
#endif
iter++;
return -gl_cost_of_opening_x;
}
| ca275c647c04b8d1267caae8c3cbbd8b86dfa67a.cu | /***********************************************
streamcluster_cuda.cu
: parallelized code of streamcluster
- original code from PARSEC Benchmark Suite
- parallelization with CUDA API has been applied by
Shawn Sang-Ha Lee - [email protected]
University of Virginia
Department of Electrical and Computer Engineering
Department of Computer Science
***********************************************/
#include "streamcluster_header.cu"
using namespace std;
// AUTO-ERROR CHECK FOR ALL CUDA FUNCTIONS
#define CUDA_SAFE_CALL( call) do { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define THREADS_PER_BLOCK 128
#define MAXBLOCKS 65536
#define CUDATIME
// host memory
float *work_mem_h;
float *coord_h;
// device memory
float *work_mem_d;
float *coord_d;
int *center_table_d;
bool *switch_membership_d;
Point *p;
static int iter = 0; // counter for total# of iteration
//=======================================
// Euclidean Distance
//=======================================
__device__ float
d_dist(int p1, int p2, int num, int dim, float *coord_d)
{
float retval = 0.0;
for(int i = 0; i < dim; i++){
float tmp = coord_d[(i*num)+p1] - coord_d[(i*num)+p2];
retval += tmp * tmp;
}
return retval;
}
//=======================================
// Kernel - Compute Cost
//=======================================
__global__ void
kernel_compute_cost(int num, int dim, long x, Point *p, int K, int stride,
float *coord_d, float *work_mem_d, int *center_table_d, bool *switch_membership_d)
{
// block ID and global thread ID
const int bid = blockIdx.x + gridDim.x * blockIdx.y;
const int tid = blockDim.x * bid + threadIdx.x;
if(tid < num)
{
float *lower = &work_mem_d[tid*stride];
// cost between this point and point[x]: euclidean distance multiplied by weight
float x_cost = d_dist(tid, x, num, dim, coord_d) * p[tid].weight;
// if computed cost is less then original (it saves), mark it as to reassign
if ( x_cost < p[tid].cost )
{
switch_membership_d[tid] = 1;
lower[K] += x_cost - p[tid].cost;
}
// if computed cost is larger, save the difference
else
{
lower[center_table_d[p[tid].assign]] += p[tid].cost - x_cost;
}
}
}
//=======================================
// Allocate Device Memory
//=======================================
void allocDevMem(int num, int dim)
{
CUDA_SAFE_CALL( cudaMalloc((void**) ¢er_table_d, num * sizeof(int)) );
CUDA_SAFE_CALL( cudaMalloc((void**) &switch_membership_d, num * sizeof(bool)) );
CUDA_SAFE_CALL( cudaMalloc((void**) &p, num * sizeof(Point)) );
CUDA_SAFE_CALL( cudaMalloc((void**) &coord_d, num * dim * sizeof(float)) );
}
//=======================================
// Allocate Host Memory
//=======================================
void allocHostMem(int num, int dim)
{
coord_h = (float*) malloc( num * dim * sizeof(float) );
}
//=======================================
// Free Device Memory
//=======================================
void freeDevMem()
{
CUDA_SAFE_CALL( cudaFree(center_table_d) );
CUDA_SAFE_CALL( cudaFree(switch_membership_d) );
CUDA_SAFE_CALL( cudaFree(p) );
CUDA_SAFE_CALL( cudaFree(coord_d) );
}
//=======================================
// Free Host Memory
//=======================================
void freeHostMem()
{
free(coord_h);
}
//=======================================
// pgain Entry - CUDA SETUP + CUDA CALL
//=======================================
float pgain( long x, Points *points, float z, long int *numcenters, int kmax, bool *is_center, int *center_table, bool *switch_membership, bool isCoordChanged,
double *serial_t, double *cpu_to_gpu_t, double *gpu_to_cpu_t, double *alloc_t, double *kernel_t, double *free_t)
{
#ifdef CUDATIME
float tmp_t;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
cudaError_t error;
int stride = *numcenters + 1; // size of each work_mem segment
int K = *numcenters ; // number of centers
int num = points->num; // number of points
int dim = points->dim; // number of dimension
int nThread = num; // number of threads == number of data points
//=========================================
// ALLOCATE HOST MEMORY + DATA PREPARATION
//=========================================
work_mem_h = (float*) malloc(stride * (nThread + 1) * sizeof(float) );
// Only on the first iteration
if(iter == 0)
{
allocHostMem(num, dim);
}
// build center-index table
int count = 0;
for( int i=0; i<num; i++)
{
if( is_center[i] )
{
center_table[i] = count++;
}
}
// Extract 'coord'
// Only if first iteration OR coord has changed
if(isCoordChanged || iter == 0)
{
for(int i=0; i<dim; i++)
{
for(int j=0; j<num; j++)
{
coord_h[ (num*i)+j ] = points->p[j].coord[i];
}
}
}
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*serial_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// ALLOCATE GPU MEMORY
//=======================================
CUDA_SAFE_CALL( cudaMalloc((void**) &work_mem_d, stride * (nThread + 1) * sizeof(float)) );
// Only on the first iteration
if( iter == 0 )
{
allocDevMem(num, dim);
}
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*alloc_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// CPU-TO-GPU MEMORY COPY
//=======================================
// Only if first iteration OR coord has changed
if(isCoordChanged || iter == 0)
{
CUDA_SAFE_CALL( cudaMemcpy(coord_d, coord_h, num * dim * sizeof(float), cudaMemcpyHostToDevice) );
}
CUDA_SAFE_CALL( cudaMemcpy(center_table_d, center_table, num * sizeof(int), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy(p, points->p, num * sizeof(Point), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemset((void*) switch_membership_d, 0, num * sizeof(bool)) );
CUDA_SAFE_CALL( cudaMemset((void*) work_mem_d, 0, stride * (nThread + 1) * sizeof(float)) );
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*cpu_to_gpu_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// KERNEL: CALCULATE COST
//=======================================
// Determine the number of thread blocks in the x- and y-dimension
int num_blocks = (int) ((float) (num + THREADS_PER_BLOCK - 1) / (float) THREADS_PER_BLOCK);
int num_blocks_y = (int) ((float) (num_blocks + MAXBLOCKS - 1) / (float) MAXBLOCKS);
int num_blocks_x = (int) ((float) (num_blocks+num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
kernel_compute_cost<<<grid_size, THREADS_PER_BLOCK>>>(
num, // in: # of data
dim, // in: dimension of point coordinates
x, // in: point to open a center at
p, // in: data point array
K, // in: number of centers
stride, // in: size of each work_mem segment
coord_d, // in: array of point coordinates
work_mem_d, // out: cost and lower field array
center_table_d, // in: center index table
switch_membership_d // out: changes in membership
);
cudaThreadSynchronize();
// error check
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("kernel error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*kernel_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// GPU-TO-CPU MEMORY COPY
//=======================================
CUDA_SAFE_CALL( cudaMemcpy(work_mem_h, work_mem_d, stride * (nThread + 1) * sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaMemcpy(switch_membership, switch_membership_d, num * sizeof(bool), cudaMemcpyDeviceToHost) );
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*gpu_to_cpu_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// CPU (SERIAL) WORK
//=======================================
int number_of_centers_to_close = 0;
float gl_cost_of_opening_x = z;
float *gl_lower = &work_mem_h[stride * nThread];
// compute the number of centers to close if we are to open i
for(int i=0; i < num; i++)
{
if( is_center[i] )
{
float low = z;
for( int j = 0; j < num; j++ )
{
low += work_mem_h[ j*stride + center_table[i] ];
}
gl_lower[center_table[i]] = low;
if ( low > 0 )
{
++number_of_centers_to_close;
work_mem_h[i*stride+K] -= low;
}
}
gl_cost_of_opening_x += work_mem_h[i*stride+K];
}
//if opening a center at x saves cost (i.e. cost is negative) do so; otherwise, do nothing
if ( gl_cost_of_opening_x < 0 )
{
for(int i = 0; i < num; i++)
{
bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ;
if ( switch_membership[i] || close_center )
{
points->p[i].cost = dist(points->p[i], points->p[x], dim) * points->p[i].weight;
points->p[i].assign = x;
}
}
for(int i = 0; i < num; i++)
{
if( is_center[i] && gl_lower[center_table[i]] > 0 )
{
is_center[i] = false;
}
}
if( x >= 0 && x < num)
{
is_center[x] = true;
}
*numcenters = *numcenters + 1 - number_of_centers_to_close;
}
else
{
gl_cost_of_opening_x = 0;
}
//=======================================
// DEALLOCATE HOST MEMORY
//=======================================
free(work_mem_h);
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*serial_t += (double) tmp_t;
cudaEventRecord(start,0);
#endif
//=======================================
// DEALLOCATE GPU MEMORY
//=======================================
CUDA_SAFE_CALL( cudaFree(work_mem_d) );
#ifdef CUDATIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_t, start, stop);
*free_t += (double) tmp_t;
#endif
iter++;
return -gl_cost_of_opening_x;
}
|
e31ea3af84ea2dc12a9ecad5778e6b73c9f64407.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
////for debug purposes
//#define PRINT_RESIDUALS_SPARSE
//#define PRINT_RESIDUALS_DENSE
//#define ENABLE_EARLY_OUT
#include "GlobalDefines.h"
#include "SolverBundlingParameters.h"
#include "SolverBundlingState.h"
#include "SolverBundlingUtil.h"
#include "SolverBundlingEquations.h"
#include "SolverBundlingEquationsLie.h"
#include "SolverBundlingDenseUtil.h"
#include "../shared/CUDATimer.h"
#define THREADS_PER_BLOCK_DENSE_DEPTH 128
#define THREADS_PER_BLOCK_DENSE_DEPTH_FLIP 64
#define THREADS_PER_BLOCK_DENSE_OVERLAP 512
// For comparisons. When enabled do not prune images or correspondences based on lack of depth correspondences
#define ALWAYS_USE_DENSE_CORRESPONDENCES 1
#define TIME_INDIVIDUAL_STAGES 0
/////////////////////////////////////////////////////////////////////////
// Dense Depth Term
/////////////////////////////////////////////////////////////////////////
template<bool usePairwise>
__global__ void FindImageImageCorr_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
// image indices
unsigned int i, j; // project from j to i
if (usePairwise) {
i = blockIdx.x; j = blockIdx.y; // all pairwise
if (i >= j) return;
}
else {
i = blockIdx.x; j = i + 1; // frame-to-frame
}
if (input.d_validImages[i] == 0 || input.d_validImages[j] == 0) return;
const unsigned int tidx = threadIdx.x;
const unsigned int subWidth = input.denseDepthWidth / parameters.denseOverlapCheckSubsampleFactor;
const unsigned int x = (tidx % subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int y = (tidx / subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int idx = y * input.denseDepthWidth + x;
if (idx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform = state.d_xTransformInverses[i] * state.d_xTransforms[j];
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse(); //TODO PRECOMPUTE?
float4x4 transform = invTransform_i * transform_j;
#endif
//if (!computeAngleDiff(transform, 1.0f)) return; //~60 degrees //TODO HERE ANGIE
//if (!computeAngleDiff(transform, 0.8f)) return; //~45 degrees
if (!computeAngleDiff(transform, 0.52f)) return; //~30 degrees
// find correspondence
__shared__ int foundCorr[1]; foundCorr[0] = 0;
__syncthreads();
if (findDenseCorr(idx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[j].d_depthDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src //TODO PARAMS
atomicAdd(foundCorr, 1);
} // found correspondence
__syncthreads();
if (tidx == 0) {
//printf("(%d,%d): %d\n", i, j, foundCorr[0]);
if (ALWAYS_USE_DENSE_CORRESPONDENCES || foundCorr[0] > 10) { //TODO PARAMS
int addr = atomicAdd(state.d_numDenseOverlappingImages, 1);
state.d_denseOverlappingImages[addr] = make_uint2(i, j);
}
}
} // valid image pixel
}
__global__ void FlipJtJ_Kernel(unsigned int total, unsigned int dim, float* d_JtJ)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total) {
const unsigned int x = idx % dim;
const unsigned int y = idx / dim;
if (x > y) {
d_JtJ[y * dim + x] = d_JtJ[x * dim + y];
}
}
}
__global__ void FindDenseCorrespondences_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
const int imPairIdx = blockIdx.x; //should not go out of bounds, no need to check
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x; unsigned int j = imageIndices.y;
const unsigned int tidx = threadIdx.x;
const unsigned int gidx = tidx * gridDim.y + blockIdx.y;
if (gidx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform = state.d_xTransformInverses[i] * state.d_xTransforms[j]; //invTransform_i * transform_j
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse();
float4x4 transform = invTransform_i * transform_j;
#endif
// find correspondence
const int numWarps = THREADS_PER_BLOCK_DENSE_DEPTH / WARP_SIZE;
__shared__ int s_count[numWarps];
s_count[0] = 0;
int count = 0.0f;
//TODO HERE ANGIE
#ifdef CUDACACHE_UCHAR_NORMALS
if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
#elif defined(CUDACACHE_FLOAT_NORMALS)
if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
#endif
//#ifdef CUDACACHE_UCHAR_NORMALS
// if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
// input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
// parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
//#else
// if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
// input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
// parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
//#endif
//atomicAdd(&state.d_denseCorrCounts[imPairIdx], 1.0f);
count++;
} // found correspondence
count = warpReduce(count);
__syncthreads();
if (tidx % WARP_SIZE == 0) {
s_count[tidx / WARP_SIZE] = count;
//atomicAdd(&state.d_denseCorrCounts[imPairIdx], count);
}
__syncthreads();
for (unsigned int stride = numWarps / 2; stride > 0; stride /= 2) {
if (tidx < stride) s_count[tidx] = s_count[tidx] + s_count[tidx + stride];
__syncthreads();
}
if (tidx == 0) {
atomicAdd(&state.d_denseCorrCounts[imPairIdx], s_count[0]);
}
} // valid image pixel
}
__global__ void WeightDenseCorrespondences_Kernel(unsigned int N, SolverState state)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// apply ln to weights
float x = state.d_denseCorrCounts[idx];
if (x > 0) {
//if (x < 3200) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
if (x < 800 && (!ALWAYS_USE_DENSE_CORRESPONDENCES)) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
//if (x < 400) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
//if (x < 200) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS //TODO EVAL DEBUG
else {
state.d_denseCorrCounts[idx] = 1.0f / min(logf(x), 9.0f); // natural log //TODO PARAMS
}
//state.d_denseCorrCounts[idx] = 1.0f / clamp(logf(x), 2.0f, 9.0f); // natural log //TODO PARAMS
}
}
}
template<bool useDepth, bool useColor>
__global__ void BuildDenseSystem_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
const int imPairIdx = blockIdx.x;
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x; unsigned int j = imageIndices.y;
float imPairWeight = state.d_denseCorrCounts[imPairIdx];
#if !ALWAYS_USE_DENSE_CORRESPONDENCES
if (imPairWeight == 0.0f) return;
#endif
imPairWeight = 1.0f; // TODO: undo
const unsigned int idx = threadIdx.x;
const unsigned int srcIdx = idx * gridDim.y + blockIdx.y;
if (srcIdx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform_i = state.d_xTransforms[i];
float4x4 transform_j = state.d_xTransforms[j];
float4x4 invTransform_i = state.d_xTransformInverses[i];
float4x4 invTransform_j = state.d_xTransformInverses[j];
float4x4 transform = invTransform_i * transform_j;
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse(); //TODO PRECOMPUTE?
float4x4 transform = invTransform_i * transform_j;
#endif
// point-to-plane term
matNxM<1, 6> depthJacBlockRow_i, depthJacBlockRow_j; depthJacBlockRow_i.setZero(); depthJacBlockRow_j.setZero();
float depthRes = 0.0f; float depthWeight = 0.0f;
// color term
matNxM<1, 6> colorJacBlockRow_i, colorJacBlockRow_j; colorJacBlockRow_i.setZero(); colorJacBlockRow_j.setZero();
float colorRes = 0.0f; float colorWeight = 0.0f;
// find correspondence
float3 camPosSrc; float3 camPosSrcToTgt; float3 camPosTgt; float3 normalTgt; float2 tgtScreenPos;
//TODO HERE ANGIE
#ifdef CUDACACHE_FLOAT_NORMALS
bool foundCorr = findDenseCorr(
#if HELPER_INDEX
i, j,
#endif
srcIdx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
#elif defined(CUDACACHE_UCHAR_NORMALS)
bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
#endif
//#ifdef CUDACACHE_UCHAR_NORMALS
// bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
// input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
// parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
//#else
// bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
// input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
// parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
//#endif
if (useDepth) {
if (foundCorr) {
// point-to-plane residual
float3 diff = camPosTgt - camPosSrcToTgt;
depthRes = dot(diff, normalTgt);
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, 0.5f*((1.0f - length(diff) / parameters.denseDistThresh) + (1.0f - camPosTgt.z / parameters.denseDepthMax)));
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 2.0f)); //fr1_desk
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 2.5f)); //fr3_office, fr2_xyz_half // livingroom1
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 3.0f)); //fr3_nstn
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.5f), 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.0f), 1.8f)); //fr3_office, fr1_desk_f20
depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.0f), 2.5f)); //fr2_xyz_half
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 3.5f), 1.8f)); //fr3_nstn
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / parameters.denseDepthMax), 1.8f)); //TODO EVAL DEBUGGING
//float wtgt = (pow(max(0.0f, 1.0f - camPosTgt.z / 2.5f), 1.8f));
//float wsrc = (pow(max(0.0f, 1.0f - camPosSrc.z / 2.5f), 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * wtgt * wsrc;
#ifdef USE_LIE_SPACE
if (i > 0) computeJacobianBlockRow_i(depthJacBlockRow_i, transform_i, invTransform_j, camPosSrc, normalTgt);
if (j > 0) computeJacobianBlockRow_j(depthJacBlockRow_j, invTransform_i, transform_j, camPosSrc, normalTgt);
#else
if (i > 0) computeJacobianBlockRow_i(depthJacBlockRow_i, state.d_xRot[i], state.d_xTrans[i], transform_j, camPosSrc, normalTgt);
if (j > 0) computeJacobianBlockRow_j(depthJacBlockRow_j, state.d_xRot[j], state.d_xTrans[j], invTransform_i, camPosSrc, normalTgt);
#endif
}
addToLocalSystem(foundCorr, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx
, state.d_sumResidual, state.d_corrCount);
//addToLocalSystemBrute(foundCorr, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
// depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx);
}
if (useColor) {
bool foundCorrColor = false;
if (foundCorr) {
const float2 intensityDerivTgt = bilinearInterpolationFloat2(tgtScreenPos.x, tgtScreenPos.y, input.d_cacheFrames[i].d_intensityDerivsDownsampled, input.denseDepthWidth, input.denseDepthHeight);
const float intensityTgt = bilinearInterpolationFloat(tgtScreenPos.x, tgtScreenPos.y, input.d_cacheFrames[i].d_intensityDownsampled, input.denseDepthWidth, input.denseDepthHeight);
colorRes = intensityTgt - input.d_cacheFrames[j].d_intensityDownsampled[srcIdx];
foundCorrColor = (intensityDerivTgt.x != MINF && abs(colorRes) < parameters.denseColorThresh && length(intensityDerivTgt) > parameters.denseColorGradientMin);
if (foundCorrColor) {
const float2 focalLength = make_float2(input.intrinsics.x, input.intrinsics.y);
#ifdef USE_LIE_SPACE
if (i > 0) computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, transform_i, invTransform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
if (j > 0) computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, invTransform_i, transform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
#else
if (i > 0) computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, state.d_xRot[i], state.d_xTrans[i], transform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
if (j > 0) computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, state.d_xRot[j], state.d_xTrans[j], invTransform_i, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
#endif
colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 1.0f - abs(colorRes) / (1.15f*parameters.denseColorThresh));
//colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 1.0f - abs(colorRes) / parameters.denseColorThresh) * max(0.0f, (1.0f - camPosTgt.z / 1.0f));
//colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 0.5f*(1.0f - abs(colorRes) / parameters.denseColorThresh) + 0.5f*max(0.0f, (1.0f - camPosTgt.z / 1.0f)));
}
}
addToLocalSystem(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx
, state.d_sumResidualColor, state.d_corrCountColor);
//addToLocalSystemBrute(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
// colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx);
}
} // valid image pixel
}
bool BuildDenseSystem(const SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
const unsigned int N = input.numberOfImages;
const int sizeJtr = 6 * N;
const int sizeJtJ = sizeJtr * sizeJtr;
#ifdef PRINT_RESIDUALS_DENSE
cutilSafeCall(hipMemset(state.d_corrCount, 0, sizeof(int)));
cutilSafeCall(hipMemset(state.d_sumResidual, 0, sizeof(float)));
cutilSafeCall(hipMemset(state.d_corrCountColor, 0, sizeof(int)));
cutilSafeCall(hipMemset(state.d_sumResidualColor, 0, sizeof(float)));
#endif
const unsigned int maxDenseImPairs = input.numberOfImages * (input.numberOfImages - 1) / 2;
cutilSafeCall(hipMemset(state.d_denseCorrCounts, 0, sizeof(float) * maxDenseImPairs));
cutilSafeCall(hipMemset(state.d_denseJtJ, 0, sizeof(float) * sizeJtJ));
cutilSafeCall(hipMemset(state.d_denseJtr, 0, sizeof(float) * sizeJtr));
cutilSafeCall(hipMemset(state.d_numDenseOverlappingImages, 0, sizeof(int)));
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
dim3 gridImImOverlap;
if (parameters.useDenseDepthAllPairwise) gridImImOverlap = dim3(N, N, 1); // pairwise
else gridImImOverlap = dim3(N - 1, 1, 1); // for frame-to-frame
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("BuildDenseDepthSystem - find image corr");
if (parameters.useDenseDepthAllPairwise) FindImageImageCorr_Kernel<true> << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> >(input, state, parameters);
else FindImageImageCorr_Kernel<false> << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
int numOverlapImagePairs;
cutilSafeCall(hipMemcpy(&numOverlapImagePairs, state.d_numDenseOverlappingImages, sizeof(int), hipMemcpyDeviceToHost));
if (numOverlapImagePairs == 0) {
printf("warning: no overlapping images for dense solve\n");
return false;
}
const int reductionGlobal = (input.denseDepthWidth*input.denseDepthHeight + THREADS_PER_BLOCK_DENSE_DEPTH - 1) / THREADS_PER_BLOCK_DENSE_DEPTH;
dim3 grid(numOverlapImagePairs, reductionGlobal);
//if (N > 11) printf("num overlap image pairs = %d\n", numOverlapImagePairs); //debugging only
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("BuildDenseDepthSystem - compute im-im weights");
FindDenseCorrespondences_Kernel << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging //remember the delete!
//float* denseCorrCounts = new float[numOverlapImagePairs];
//cutilSafeCall(hipMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*numOverlapImagePairs, hipMemcpyDeviceToHost));
//unsigned int totalCount = 0;
//for (unsigned int i = 0; i < numOverlapImagePairs; i++) { totalCount += (unsigned int)denseCorrCounts[i]; }
//printf("total count = %d\n", totalCount);
//uint2* imageIndices = new uint2[numOverlapImagePairs];
//cutilSafeCall(hipMemcpy(imageIndices, state.d_denseOverlappingImages, sizeof(uint2)*numOverlapImagePairs, hipMemcpyDeviceToHost));
//if (imageIndices) delete[] imageIndices;
////debugging
//debugging - compute some overlap stats
//if (true || input.numberOfImages > 11) {
// float4x4* transforms = new float4x4[input.numberOfImages];
// float* denseCorrCounts = new float[numOverlapImagePairs];
// uint2* imageIndices = new uint2[numOverlapImagePairs];
// cutilSafeCall(hipMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*numOverlapImagePairs, hipMemcpyDeviceToHost));
// cutilSafeCall(hipMemcpy(imageIndices, state.d_denseOverlappingImages, sizeof(uint2)*numOverlapImagePairs, hipMemcpyDeviceToHost));
// cutilSafeCall(hipMemcpy(transforms, state.d_xTransforms, sizeof(float4x4)*input.numberOfImages, hipMemcpyDeviceToHost));
// FILE* fp = fopen("debug/overlaps.csv", "w");
// char buffer[128];
// for (int i = 0; i < numOverlapImagePairs; i++) {
// if (denseCorrCounts[i] > 0) {
// float3 d = transforms[imageIndices[i].x].getTranslation() - transforms[imageIndices[i].y].getTranslation();
// sprintf(buffer, "%d,%d,%d,%f\n", imageIndices[i].x, imageIndices[i].y, (int)denseCorrCounts[i], length(d));
// fwrite(buffer, sizeof(char), strlen(buffer), fp);
// }
// }
// fclose(fp);
// if (transforms) delete[] transforms;
// if (denseCorrCounts) delete[] denseCorrCounts;
// if (imageIndices) delete[] imageIndices;
// int a = 5;
//}
int wgrid = (numOverlapImagePairs + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
WeightDenseCorrespondences_Kernel << < wgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >(maxDenseImPairs, state);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging
//cutilSafeCall(hipMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*maxDenseImPairs, hipMemcpyDeviceToHost));
//totalCount = 0;
//for (unsigned int i = 0; i < maxDenseImPairs; i++) { if (denseCorrCounts[i] > 0.0f) totalCount++; }
//printf("total count = %d\n", totalCount);
//if (denseCorrCounts) delete[] denseCorrCounts;
////debugging
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("BuildDenseDepthSystem - build jtj/jtr");
if (parameters.weightDenseDepth > 0.0f) {
if (parameters.weightDenseColor > 0.0f) BuildDenseSystem_Kernel<true, true> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
else BuildDenseSystem_Kernel<true, false> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
}
else {
BuildDenseSystem_Kernel<false, true> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
}
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging
//bool debugPrint = true;
//float* h_JtJ = NULL;
//float* h_Jtr = NULL;
//if (debugPrint) {
// h_JtJ = new float[sizeJtJ];
// h_Jtr = new float[sizeJtr];
// cutilSafeCall(hipMemcpy(h_JtJ, state.d_denseJtJ, sizeof(float) * sizeJtJ, hipMemcpyDeviceToHost));
// cutilSafeCall(hipMemcpy(h_Jtr, state.d_denseJtr, sizeof(float) * sizeJtr, hipMemcpyDeviceToHost));
// printf("JtJ:\n");
// //for (unsigned int i = 0; i < 6 * N; i++) {
// // for (unsigned int j = 0; j < 6 * N; j++)
// for (unsigned int i = 6 * 1; i < 6 * 2; i++) {
// for (unsigned int j = 6 * 1; j < 6 * 2; j++)
// printf(" %f,", h_JtJ[j * 6 * N + i]);
// printf("\n");
// }
// printf("Jtr:\n");
// for (unsigned int i = 0; i < 6 * N; i++) {
// printf(" %f,", h_Jtr[i]);
// }
// printf("\n");
//}
////debugging
#ifdef PRINT_RESIDUALS_DENSE
if (parameters.weightDenseDepth > 0) {
float sumResidual; int corrCount;
cutilSafeCall(hipMemcpy(&sumResidual, state.d_sumResidual, sizeof(float), hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(&corrCount, state.d_corrCount, sizeof(int), hipMemcpyDeviceToHost));
printf("\tdense depth: weights * residual = %g * %g = %g\t[#corr = %d]\n", parameters.weightDenseDepth, sumResidual / parameters.weightDenseDepth, sumResidual, corrCount);
}
if (parameters.weightDenseColor > 0) {
float sumResidual; int corrCount;
cutilSafeCall(hipMemcpy(&sumResidual, state.d_sumResidualColor, sizeof(float), hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(&corrCount, state.d_corrCountColor, sizeof(int), hipMemcpyDeviceToHost));
printf("\tdense color: weights * residual = %g * %g = %g\t[#corr = %d]\n", parameters.weightDenseColor, sumResidual / parameters.weightDenseColor, sumResidual, corrCount);
}
#endif
const unsigned int flipgrid = (sizeJtJ + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
FlipJtJ_Kernel << <flipgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >(sizeJtJ, sizeJtr, state.d_denseJtJ);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
return true;
}
//todo more efficient?? (there are multiple per image-image...)
//get high residuals
__global__ void collectHighResidualsDevice(SolverInput input, SolverState state, SolverStateAnalysis analysis, SolverParameters parameters, unsigned int maxNumHighResiduals)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
if (residual > parameters.highResidualThresh) {
int idx = atomicAdd(state.d_countHighResidual, 1);
if (idx < maxNumHighResiduals) {
analysis.d_maxResidual[idx] = residual;
analysis.d_maxResidualIndex[idx] = corrIdx;
}
}
}
}
extern "C" void collectHighResiduals(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, SolverParameters& parameters, CUDATimer* timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
cutilSafeCall(hipMemset(state.d_countHighResidual, 0, sizeof(int)));
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
unsigned int maxNumHighResiduals = (input.maxCorrPerImage*input.maxNumberOfImages + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
collectHighResidualsDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, analysis, parameters, maxNumHighResiduals);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
}
/////////////////////////////////////////////////////////////////////////
// Eval Max Residual
/////////////////////////////////////////////////////////////////////////
__global__ void EvalMaxResidualDevice(SolverInput input, SolverState state, SolverStateAnalysis analysis, SolverParameters parameters)
{
__shared__ int maxResIndex[THREADS_PER_BLOCK];
__shared__ float maxRes[THREADS_PER_BLOCK];
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
maxResIndex[threadIdx.x] = 0;
maxRes[threadIdx.x] = 0.0f;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
maxRes[threadIdx.x] = residual;
maxResIndex[threadIdx.x] = corrIdx;
__syncthreads();
for (int stride = THREADS_PER_BLOCK / 2; stride > 0; stride /= 2) {
if (threadIdx.x < stride) {
int first = threadIdx.x;
int second = threadIdx.x + stride;
if (maxRes[first] < maxRes[second]) {
maxRes[first] = maxRes[second];
maxResIndex[first] = maxResIndex[second];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
//printf("d_maxResidual[%d] = %g (index %d)\n", blockIdx.x, maxRes[0], maxResIndex[0]);
analysis.d_maxResidual[blockIdx.x] = maxRes[0];
analysis.d_maxResidualIndex[blockIdx.x] = maxResIndex[0];
}
}
}
extern "C" void evalMaxResidual(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, SolverParameters& parameters, CUDATimer* timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
EvalMaxResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, analysis, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
}
/////////////////////////////////////////////////////////////////////////
// Eval Cost
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float residual = 0.0f;
if (x < N) {
residual = evalFDevice(x, input, state, parameters);
}
residual = warpReduce(residual);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(&state.d_sumResidual[0], residual);
}
}
extern "C" float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
float residual = 0.0f;
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
residual = state.getSumResidual();
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
return residual;
}
/////////////////////////////////////////////////////////////////////////
// Eval Linear Residual
/////////////////////////////////////////////////////////////////////////
//__global__ void SumLinearResDevice(SolverInput input, SolverState state, SolverParameters parameters)
//{
// const unsigned int N = input.numberOfImages; // Number of block variables
// const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
//
// float residual = 0.0f;
// if (x > 0 && x < N) {
// residual = dot(state.d_rRot[x], state.d_rRot[x]) + dot(state.d_rTrans[x], state.d_rTrans[x]);
// atomicAdd(state.d_sumLinResidual, residual);
// }
//}
//float EvalLinearRes(SolverInput& input, SolverState& state, SolverParameters& parameters)
//{
// float residual = 0.0f;
//
// const unsigned int N = input.numberOfImages; // Number of block variables
//
// // Do PCG step
// const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
//
// float init = 0.0f;
// cutilSafeCall(hipMemcpy(state.d_sumLinResidual, &init, sizeof(float), hipMemcpyHostToDevice));
//
// SumLinearResDevice << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
//#ifdef _DEBUG
// cutilSafeCall(hipDeviceSynchronize());
// cutilCheckMsg(__FUNCTION__);
//#endif
//
// cutilSafeCall(hipMemcpy(&residual, state.d_sumLinResidual, sizeof(float), hipMemcpyDeviceToHost));
// return residual;
//}
/////////////////////////////////////////////////////////////////////////
// Count High Residuals
/////////////////////////////////////////////////////////////////////////
__global__ void CountHighResidualsDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
if (residual > parameters.verifyOptDistThresh)
atomicAdd(state.d_countHighResidual, 1);
}
}
extern "C" int countHighResiduals(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
cutilSafeCall(hipMemset(state.d_countHighResidual, 0, sizeof(int)));
CountHighResidualsDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
int count;
cutilSafeCall(hipMemcpy(&count, state.d_countHighResidual, sizeof(int), hipMemcpyDeviceToHost));
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
return count;
}
/////////////////////////////////////////////////////////////////////////
// Convergence Analysis
/////////////////////////////////////////////////////////////////////////
//uses same data store as max residual
__global__ void EvalGNConvergenceDevice(SolverInput input, SolverStateAnalysis analysis, SolverState state) //compute max of delta
{
__shared__ float maxVal[THREADS_PER_BLOCK];
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
maxVal[threadIdx.x] = 0.0f;
if (x < N)
{
if (x == 0 || input.d_validImages[x] == 0)
maxVal[threadIdx.x] = 0.0f;
else {
float3 r3 = fmaxf(fabs(state.d_deltaRot[x]), fabs(state.d_deltaTrans[x]));
float r = fmaxf(r3.x, fmaxf(r3.y, r3.z));
maxVal[threadIdx.x] = r;
}
__syncthreads();
for (int stride = THREADS_PER_BLOCK / 2; stride > 0; stride /= 2) {
if (threadIdx.x < stride) {
int first = threadIdx.x;
int second = threadIdx.x + stride;
maxVal[first] = fmaxf(maxVal[first], maxVal[second]);
}
__syncthreads();
}
if (threadIdx.x == 0) {
analysis.d_maxResidual[blockIdx.x] = maxVal[0];
}
}
}
float EvalGNConvergence(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, CUDATimer* timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfImages;
const unsigned int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
EvalGNConvergenceDevice << < blocksPerGrid, THREADS_PER_BLOCK >> >(input, analysis, state);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//copy to host and compute max
cutilSafeCall(hipMemcpy(analysis.h_maxResidual, analysis.d_maxResidual, sizeof(float) * blocksPerGrid, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(analysis.h_maxResidualIndex, analysis.d_maxResidualIndex, sizeof(int) * blocksPerGrid, hipMemcpyDeviceToHost));
float maxVal = 0.0f;
for (unsigned int i = 0; i < blocksPerGrid; i++) {
if (maxVal < analysis.h_maxResidual[i]) maxVal = analysis.h_maxResidual[i];
}
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
return maxVal;
}
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
template<bool useDense>
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N)
{
float3 resRot, resTrans;
evalMinusJTFDevice<useDense>(x, input, state, parameters, resRot, resTrans); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_rRot[x] = resRot; // store for next iteration
state.d_rTrans[x] = resTrans; // store for next iteration
const float3 pRot = state.d_precondionerRot[x] * resRot; // apply preconditioner M^-1
state.d_pRot[x] = pRot;
const float3 pTrans = state.d_precondionerTrans[x] * resTrans; // apply preconditioner M^-1
state.d_pTrans[x] = pTrans;
d = dot(resRot, pRot) + dot(resTrans, pTrans); // x-th term of nomimator for computing alpha and denominator for computing beta
state.d_Ap_XRot[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_XTrans[x] = make_float3(0.0f, 0.0f, 0.0f);
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N) state.d_rDotzOld[x] = state.d_scanAlpha[0]; // store result for next kernel call
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
const unsigned int N = input.numberOfImages;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("Initialization");
//!!!DEBUGGING //remember to uncomment the delete...
//float3* rRot = new float3[input.numberOfImages]; // -jtf
//float3* rTrans = new float3[input.numberOfImages];
//!!!DEBUGGING
cutilSafeCall(hipMemset(state.d_scanAlpha, 0, sizeof(float)));
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (parameters.useDense) PCGInit_Kernel1<true> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
else PCGInit_Kernel1<false> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//cutilSafeCall(hipMemcpy(rRot, state.d_rRot, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//cutilSafeCall(hipMemcpy(rTrans, state.d_rTrans, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rRot[i].x)) { printf("NaN in jtr rRot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rTrans[i].x)) { printf("NaN in jtr rTrans %d\n", i); getchar(); } }
//cutilSafeCall(hipMemcpy(rRot, state.d_pRot, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//cutilSafeCall(hipMemcpy(rTrans, state.d_pTrans, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rRot[i].x)) { printf("NaN in jtr pRot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rTrans[i].x)) { printf("NaN in jtr pTrans %d\n", i); getchar(); } }
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(N, state);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
//float scanAlpha;
//cutilSafeCall(hipMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost));
//if (rRot) delete[] rRot;
//if (rTrans) delete[] rTrans;
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
//inefficient
__global__ void PCGStep_Kernel_Dense_Brute(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTJDenseBruteDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans); // A x p_k => J^T x J x p_k
state.d_Ap_XRot[x] += rot;
state.d_Ap_XTrans[x] += trans;
}
}
__global__ void PCGStep_Kernel_Dense(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
const unsigned int lane = threadIdx.x % WARP_SIZE;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTJDenseDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans, threadIdx.x); // A x p_k => J^T x J x p_k
if (lane == 0)
{
atomicAdd(&state.d_Ap_XRot[x].x, rot.x);
atomicAdd(&state.d_Ap_XRot[x].y, rot.y);
atomicAdd(&state.d_Ap_XRot[x].z, rot.z);
atomicAdd(&state.d_Ap_XTrans[x].x, trans.x);
atomicAdd(&state.d_Ap_XTrans[x].y, trans.y);
atomicAdd(&state.d_Ap_XTrans[x].z, trans.z);
}
}
}
__global__ void PCGStep_Kernel0(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float3 tmp = applyJDevice(x, input, state, parameters); // A x p_k => J^T x J x p_k
state.d_Jp[x] = tmp; // store for next kernel call
}
}
__global__ void PCGStep_Kernel1a(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
const unsigned int lane = threadIdx.x % WARP_SIZE;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTDevice(x, input, state, parameters, rot, trans, threadIdx.x, lane); // A x p_k => J^T x J x p_k
if (lane == 0)
{
atomicAdd(&state.d_Ap_XRot[x].x, rot.x);
atomicAdd(&state.d_Ap_XRot[x].y, rot.y);
atomicAdd(&state.d_Ap_XRot[x].z, rot.z);
atomicAdd(&state.d_Ap_XTrans[x].x, trans.x);
atomicAdd(&state.d_Ap_XTrans[x].y, trans.y);
atomicAdd(&state.d_Ap_XTrans[x].z, trans.z);
}
}
}
__global__ void PCGStep_Kernel1b(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N)
{
d = dot(state.d_pRot[x], state.d_Ap_XRot[x]) + dot(state.d_pTrans[x], state.d_Ap_XTrans[x]); // x-th term of denominator of alpha
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x > 0 && x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_deltaRot[x] = state.d_deltaRot[x] + alpha*state.d_pRot[x]; // do a decent step
state.d_deltaTrans[x] = state.d_deltaTrans[x] + alpha*state.d_pTrans[x]; // do a decent step
float3 rRot = state.d_rRot[x] - alpha*state.d_Ap_XRot[x]; // update residuum
state.d_rRot[x] = rRot; // store for next kernel call
float3 rTrans = state.d_rTrans[x] - alpha*state.d_Ap_XTrans[x]; // update residuum
state.d_rTrans[x] = rTrans; // store for next kernel call
float3 zRot = state.d_precondionerRot[x] * rRot; // apply preconditioner M^-1
state.d_zRot[x] = zRot; // save for next kernel call
float3 zTrans = state.d_precondionerTrans[x] * rTrans; // apply preconditioner M^-1
state.d_zTrans[x] = zTrans; // save for next kernel call
b = dot(zRot, rRot) + dot(zTrans, rTrans); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(&state.d_scanAlpha[1], b);
}
}
template<bool lastIteration>
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N)
{
const float rDotzNew = state.d_scanAlpha[1]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_pRot[x] = state.d_zRot[x] + beta*state.d_pRot[x]; // update decent direction
state.d_pTrans[x] = state.d_zTrans[x] + beta*state.d_pTrans[x]; // update decent direction
state.d_Ap_XRot[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_XTrans[x] = make_float3(0.0f, 0.0f, 0.0f);
if (lastIteration)
{
//if (input.d_validImages[x]) { //not really necessary
#ifdef USE_LIE_SPACE //TODO just keep that matrix transforms around
float3 rot, trans;
computeLieUpdate(state.d_deltaRot[x], state.d_deltaTrans[x], state.d_xRot[x], state.d_xTrans[x], rot, trans);
state.d_xRot[x] = rot;
state.d_xTrans[x] = trans;
#else
state.d_xRot[x] = state.d_xRot[x] + state.d_deltaRot[x];
state.d_xTrans[x] = state.d_xTrans[x] + state.d_deltaTrans[x];
#endif
//}
}
}
}
template<bool useSparse, bool useDense>
bool PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverStateAnalysis& analysis, bool lastIteration, CUDATimer *timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("Full PCGIteration");
const unsigned int N = input.numberOfImages; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("Memset scanalpha");
cutilSafeCall(hipMemset(state.d_scanAlpha, 0, sizeof(float) * 2));
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
// sparse part
if (useSparse) {
const unsigned int Ncorr = input.numberOfCorrespondences;
const int blocksPerGridCorr = (Ncorr + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep1 (Sparse J)");
PCGStep_Kernel0 << <blocksPerGridCorr, THREADS_PER_BLOCK >> >(input, state, parameters);
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep1 (Sparse Jt)");
PCGStep_Kernel1a << < N, THREADS_PER_BLOCK_JT >> >(input, state, parameters);
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
if (useDense) {
//if (timer) timer->startEvent("apply JTJ dense");
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep1 (Dense JtJ)");
PCGStep_Kernel_Dense << < N, THREADS_PER_BLOCK_JT_DENSE >> >(input, state, parameters);
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
//PCGStep_Kernel_Dense_Brute << < N, 1 >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//if (timer) timer->endEvent();
}
//!!!debugging
//float3* Ap_Rot = new float3[input.numberOfImages];
//float3* Ap_Trans = new float3[input.numberOfImages];
//cutilSafeCall(hipMemcpy(Ap_Rot, state.d_Ap_XRot, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//cutilSafeCall(hipMemcpy(Ap_Trans, state.d_Ap_XTrans, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.maxNumberOfImages; i++) { if (isnan(Ap_Rot[i].x)) { printf("NaN at Ap rot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.maxNumberOfImages; i++) { if (isnan(Ap_Trans[i].x)) { printf("NaN at Ap trans %d\n", i); getchar(); } }
//if (Ap_Rot) delete[] Ap_Rot;
//if (Ap_Trans) delete[] Ap_Trans;
//!!!debugging
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep1 (Finish)");
PCGStep_Kernel1b << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep2");
PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
#ifdef ENABLE_EARLY_OUT //for convergence
float scanAlpha; cutilSafeCall(hipMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), hipMemcpyDeviceToHost));
//if (fabs(scanAlpha) < 0.00005f) lastIteration = true; //todo check this part
//if (fabs(scanAlpha) < 1e-6) lastIteration = true; //todo check this part
if (fabs(scanAlpha) < 5e-7) { lastIteration = true; } //todo check this part
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep3");
if (lastIteration) {
PCGStep_Kernel3<true> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
}
else {
PCGStep_Kernel3<false> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
}
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
return lastIteration;
}
#ifdef USE_LIE_SPACE //TODO
////////////////////////////////////////////////////////////////////
// matrix <-> pose
////////////////////////////////////////////////////////////////////
__global__ void convertLiePosesToMatricesCU_Kernel(const float3* d_rot, const float3* d_trans, unsigned int numTransforms, float4x4* d_transforms, float4x4* d_transformInvs)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numTransforms) {
poseToMatrix(d_rot[idx], d_trans[idx], d_transforms[idx]);
d_transformInvs[idx] = d_transforms[idx].getInverse();
}
}
extern "C"
void convertLiePosesToMatricesCU(const float3* d_rot, const float3* d_trans, unsigned int numTransforms, float4x4* d_transforms, float4x4* d_transformInvs, CUDATimer *timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("Lie -> Matrix");
convertLiePosesToMatricesCU_Kernel << <(numTransforms + 8 - 1) / 8, 8 >> >(d_rot, d_trans, numTransforms, d_transforms, d_transformInvs);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
}
#endif
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" void solveBundlingStub(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverStateAnalysis& analysis, float* convergenceAnalysis, CUDATimer *timer, bool dumpInputOutput)
{
if (convergenceAnalysis) {
float initialResidual = EvalResidual(input, state, parameters, timer);
convergenceAnalysis[0] = initialResidual; // initial residual
}
float initResidual, endResidual;
if (input.numberOfImages > 11) {
initResidual = EvalResidual(input, state, parameters, timer);
}
//!!!DEBUGGING
#ifdef PRINT_RESIDUALS_SPARSE
if (parameters.weightSparse > 0) {
if (input.numberOfCorrespondences == 0) { printf("ERROR: %d correspondences\n", input.numberOfCorrespondences); getchar(); }
float initialResidual = EvalResidual(input, state, parameters, timer);
printf("initial sparse = %g*%g = %g\n", parameters.weightSparse, initialResidual / parameters.weightSparse, initialResidual);
}
#endif
//float3* xRot = new float3[input.numberOfImages]; //remember the delete!
//float3* xTrans = new float3[input.numberOfImages];
//timer = new CUDATimer();
//static unsigned int totalLinIters = 0, numLin = 0, totalNonLinIters = 0, numNonLin = 0;
//!!!DEBUGGING
if(timer) timer->startEvent("Total");
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
if (timer) timer->startEvent("Nonlinear Iteration");
if (timer) timer->startEvent("Nonlinear Setup");
parameters.weightSparse = input.weightsSparse[nIter];
parameters.weightDenseDepth = input.weightsDenseDepth[nIter];
parameters.weightDenseColor = input.weightsDenseColor[nIter];
parameters.useDense = (parameters.weightDenseDepth > 0 || parameters.weightDenseColor > 0);
#ifdef USE_LIE_SPACE
convertLiePosesToMatricesCU(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms, state.d_xTransformInverses, timer);
#endif
if (parameters.useDense) parameters.useDense = BuildDenseSystem(input, state, parameters, timer); //don't solve dense if no overlapping frames found
Initialization(input, state, parameters, timer);
if (timer) timer->endEvent();
if (timer) timer->startEvent("Linear Solve");
if (parameters.weightSparse > 0.0f) {
if (parameters.useDense) {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<true, true>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) {
break;
}
} else {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<true, false>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) {
//totalLinIters += (linIter+1); numLin++;
break;
}
}
} else {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<false, true>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) {
break;
}
}
if (timer) timer->endEvent();
if (timer) timer->startEvent("Nonlinear Finish");
//!!!debugging
//cutilSafeCall(hipMemcpy(xRot, state.d_xRot, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//cutilSafeCall(hipMemcpy(xTrans, state.d_xTrans, sizeof(float3)*input.numberOfImages, hipMemcpyDeviceToHost));
//!!!debugging
#ifdef PRINT_RESIDUALS_SPARSE
if (parameters.weightSparse > 0) {
float residual = EvalResidual(input, state, parameters, timer);
printf("[niter %d] weight * sparse = %g*%g = %g\t[#corr = %d]\n", nIter, parameters.weightSparse, residual / parameters.weightSparse, residual, input.numberOfCorrespondences);
}
#endif
if (convergenceAnalysis) {
float residual = EvalResidual(input, state, parameters, timer);
convergenceAnalysis[nIter + 1] = residual;
}
if (timer) timer->endEvent();
if (timer) timer->endEvent();
#ifdef ENABLE_EARLY_OUT //convergence
//if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.01f) { //!!! TODO CHECK HOW THESE GENERALIZE
if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.005f) { //0.001?
//if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.001f) {
//if (!parameters.useDense) { totalNonLinIters += (nIter+1); numNonLin++; }
break;
}
//else if (!parameters.useDense && nIter == parameters.nNonLinearIterations - 1) { totalNonLinIters += (nIter+1); numNonLin++; }
#endif
}
#ifdef PRINT_RESIDUALS_DENSE
// So we can get a final result.
unsigned int nIter = parameters.nNonLinearIterations - 1;
parameters.weightSparse = input.weightsSparse[nIter];
parameters.weightDenseDepth = input.weightsDenseDepth[nIter];
parameters.weightDenseColor = input.weightsDenseColor[nIter];
parameters.useDense = (parameters.weightDenseDepth > 0 || parameters.weightDenseColor > 0);
#ifdef USE_LIE_SPACE
convertLiePosesToMatricesCU(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms, state.d_xTransformInverses, timer);
#endif
if (parameters.useDense) parameters.useDense = BuildDenseSystem(input, state, parameters, timer); //don't solve dense if no overlapping frames found
#endif
if (timer) timer->endEvent();
if (input.numberOfImages > 11) {
endResidual = EvalResidual(input, state, parameters, timer);
if (initResidual > 200 * endResidual) {
printf("Large residual decrease in global solve!: %g -> %g; %g\n", initResidual, endResidual, initResidual/endResidual);
}
}
//!!!debugging
//if (xRot) delete[] xRot;
//if (xTrans) delete[] xTrans;
//if (timer) { timer->evaluate(true, false); delete timer; }
//if (!parameters.useDense) { printf("mean #pcg its = %f\tmean #gn its = %f\n", (float)totalLinIters / (float)numLin, (float)totalNonLinIters / (float)numNonLin); } //just stats for global solve
//!!!debugging
}
////////////////////////////////////////////////////////////////////
// build variables to correspondences lookup
////////////////////////////////////////////////////////////////////
__global__ void BuildVariablesToCorrespondencesTableDevice(EntryJ* d_correspondences, unsigned int numberOfCorrespondences,
unsigned int maxNumCorrespondencesPerImage, int* d_variablesToCorrespondences, int* d_numEntriesPerRow)
{
const unsigned int N = numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
EntryJ& corr = d_correspondences[x];
if (corr.isValid()) {
int offset0 = atomicAdd(&d_numEntriesPerRow[corr.imgIdx_i], 1); // may overflow - need to check when read
int offset1 = atomicAdd(&d_numEntriesPerRow[corr.imgIdx_j], 1); // may overflow - need to check when read
if (offset0 < maxNumCorrespondencesPerImage && offset1 < maxNumCorrespondencesPerImage) {
d_variablesToCorrespondences[corr.imgIdx_i * maxNumCorrespondencesPerImage + offset0] = x;
d_variablesToCorrespondences[corr.imgIdx_j * maxNumCorrespondencesPerImage + offset1] = x;
}
else { //invalidate
printf("EXCEEDED MAX NUM CORR PER IMAGE IN SOLVER, INVALIDATING %d(%d,%d) [%d,%d | %d]\n",
x, corr.imgIdx_i, corr.imgIdx_j, offset0, offset1, maxNumCorrespondencesPerImage); //debugging
corr.setInvalid(); //make sure j corresponds to jt
}
}
}
}
extern "C" void buildVariablesToCorrespondencesTableCUDA(EntryJ* d_correspondences, unsigned int numberOfCorrespondences, unsigned int maxNumCorrespondencesPerImage, int* d_variablesToCorrespondences, int* d_numEntriesPerRow, CUDATimer* timer)
{
const unsigned int N = numberOfCorrespondences;
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
BuildVariablesToCorrespondencesTableDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_correspondences, numberOfCorrespondences, maxNumCorrespondencesPerImage, d_variablesToCorrespondences, d_numEntriesPerRow);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
}
| e31ea3af84ea2dc12a9ecad5778e6b73c9f64407.cu | #include <iostream>
////for debug purposes
//#define PRINT_RESIDUALS_SPARSE
//#define PRINT_RESIDUALS_DENSE
//#define ENABLE_EARLY_OUT
#include "GlobalDefines.h"
#include "SolverBundlingParameters.h"
#include "SolverBundlingState.h"
#include "SolverBundlingUtil.h"
#include "SolverBundlingEquations.h"
#include "SolverBundlingEquationsLie.h"
#include "SolverBundlingDenseUtil.h"
#include "../shared/CUDATimer.h"
#define THREADS_PER_BLOCK_DENSE_DEPTH 128
#define THREADS_PER_BLOCK_DENSE_DEPTH_FLIP 64
#define THREADS_PER_BLOCK_DENSE_OVERLAP 512
// For comparisons. When enabled do not prune images or correspondences based on lack of depth correspondences
#define ALWAYS_USE_DENSE_CORRESPONDENCES 1
#define TIME_INDIVIDUAL_STAGES 0
/////////////////////////////////////////////////////////////////////////
// Dense Depth Term
/////////////////////////////////////////////////////////////////////////
template<bool usePairwise>
__global__ void FindImageImageCorr_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
// image indices
unsigned int i, j; // project from j to i
if (usePairwise) {
i = blockIdx.x; j = blockIdx.y; // all pairwise
if (i >= j) return;
}
else {
i = blockIdx.x; j = i + 1; // frame-to-frame
}
if (input.d_validImages[i] == 0 || input.d_validImages[j] == 0) return;
const unsigned int tidx = threadIdx.x;
const unsigned int subWidth = input.denseDepthWidth / parameters.denseOverlapCheckSubsampleFactor;
const unsigned int x = (tidx % subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int y = (tidx / subWidth) * parameters.denseOverlapCheckSubsampleFactor;
const unsigned int idx = y * input.denseDepthWidth + x;
if (idx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform = state.d_xTransformInverses[i] * state.d_xTransforms[j];
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse(); //TODO PRECOMPUTE?
float4x4 transform = invTransform_i * transform_j;
#endif
//if (!computeAngleDiff(transform, 1.0f)) return; //~60 degrees //TODO HERE ANGIE
//if (!computeAngleDiff(transform, 0.8f)) return; //~45 degrees
if (!computeAngleDiff(transform, 0.52f)) return; //~30 degrees
// find correspondence
__shared__ int foundCorr[1]; foundCorr[0] = 0;
__syncthreads();
if (findDenseCorr(idx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[j].d_depthDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src //TODO PARAMS
atomicAdd(foundCorr, 1);
} // found correspondence
__syncthreads();
if (tidx == 0) {
//printf("(%d,%d): %d\n", i, j, foundCorr[0]);
if (ALWAYS_USE_DENSE_CORRESPONDENCES || foundCorr[0] > 10) { //TODO PARAMS
int addr = atomicAdd(state.d_numDenseOverlappingImages, 1);
state.d_denseOverlappingImages[addr] = make_uint2(i, j);
}
}
} // valid image pixel
}
__global__ void FlipJtJ_Kernel(unsigned int total, unsigned int dim, float* d_JtJ)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < total) {
const unsigned int x = idx % dim;
const unsigned int y = idx / dim;
if (x > y) {
d_JtJ[y * dim + x] = d_JtJ[x * dim + y];
}
}
}
__global__ void FindDenseCorrespondences_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
const int imPairIdx = blockIdx.x; //should not go out of bounds, no need to check
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x; unsigned int j = imageIndices.y;
const unsigned int tidx = threadIdx.x;
const unsigned int gidx = tidx * gridDim.y + blockIdx.y;
if (gidx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform = state.d_xTransformInverses[i] * state.d_xTransforms[j]; //invTransform_i * transform_j
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse();
float4x4 transform = invTransform_i * transform_j;
#endif
// find correspondence
const int numWarps = THREADS_PER_BLOCK_DENSE_DEPTH / WARP_SIZE;
__shared__ int s_count[numWarps];
s_count[0] = 0;
int count = 0.0f;
//TODO HERE ANGIE
#ifdef CUDACACHE_UCHAR_NORMALS
if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
#elif defined(CUDACACHE_FLOAT_NORMALS)
if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
#endif
//#ifdef CUDACACHE_UCHAR_NORMALS
// if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
// input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
// parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
//#else
// if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
// input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
// parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src
//#endif
//atomicAdd(&state.d_denseCorrCounts[imPairIdx], 1.0f);
count++;
} // found correspondence
count = warpReduce(count);
__syncthreads();
if (tidx % WARP_SIZE == 0) {
s_count[tidx / WARP_SIZE] = count;
//atomicAdd(&state.d_denseCorrCounts[imPairIdx], count);
}
__syncthreads();
for (unsigned int stride = numWarps / 2; stride > 0; stride /= 2) {
if (tidx < stride) s_count[tidx] = s_count[tidx] + s_count[tidx + stride];
__syncthreads();
}
if (tidx == 0) {
atomicAdd(&state.d_denseCorrCounts[imPairIdx], s_count[0]);
}
} // valid image pixel
}
__global__ void WeightDenseCorrespondences_Kernel(unsigned int N, SolverState state)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
// apply ln to weights
float x = state.d_denseCorrCounts[idx];
if (x > 0) {
//if (x < 3200) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
if (x < 800 && (!ALWAYS_USE_DENSE_CORRESPONDENCES)) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
//if (x < 400) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS
//if (x < 200) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS //TODO EVAL DEBUG
else {
state.d_denseCorrCounts[idx] = 1.0f / min(logf(x), 9.0f); // natural log //TODO PARAMS
}
//state.d_denseCorrCounts[idx] = 1.0f / clamp(logf(x), 2.0f, 9.0f); // natural log //TODO PARAMS
}
}
}
template<bool useDepth, bool useColor>
__global__ void BuildDenseSystem_Kernel(SolverInput input, SolverState state, SolverParameters parameters)
{
const int imPairIdx = blockIdx.x;
uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx];
unsigned int i = imageIndices.x; unsigned int j = imageIndices.y;
float imPairWeight = state.d_denseCorrCounts[imPairIdx];
#if !ALWAYS_USE_DENSE_CORRESPONDENCES
if (imPairWeight == 0.0f) return;
#endif
imPairWeight = 1.0f; // TODO: undo
const unsigned int idx = threadIdx.x;
const unsigned int srcIdx = idx * gridDim.y + blockIdx.y;
if (srcIdx < (input.denseDepthWidth * input.denseDepthHeight)) {
#ifdef USE_LIE_SPACE
float4x4 transform_i = state.d_xTransforms[i];
float4x4 transform_j = state.d_xTransforms[j];
float4x4 invTransform_i = state.d_xTransformInverses[i];
float4x4 invTransform_j = state.d_xTransformInverses[j];
float4x4 transform = invTransform_i * transform_j;
#else
float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]);
float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]);
float4x4 invTransform_i = transform_i.getInverse(); //TODO PRECOMPUTE?
float4x4 transform = invTransform_i * transform_j;
#endif
// point-to-plane term
matNxM<1, 6> depthJacBlockRow_i, depthJacBlockRow_j; depthJacBlockRow_i.setZero(); depthJacBlockRow_j.setZero();
float depthRes = 0.0f; float depthWeight = 0.0f;
// color term
matNxM<1, 6> colorJacBlockRow_i, colorJacBlockRow_j; colorJacBlockRow_i.setZero(); colorJacBlockRow_j.setZero();
float colorRes = 0.0f; float colorWeight = 0.0f;
// find correspondence
float3 camPosSrc; float3 camPosSrcToTgt; float3 camPosTgt; float3 normalTgt; float2 tgtScreenPos;
//TODO HERE ANGIE
#ifdef CUDACACHE_FLOAT_NORMALS
bool foundCorr = findDenseCorr(
#if HELPER_INDEX
i, j,
#endif
srcIdx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
#elif defined(CUDACACHE_UCHAR_NORMALS)
bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
#endif
//#ifdef CUDACACHE_UCHAR_NORMALS
// bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4,
// input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4,
// parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
//#else
// bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight,
// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics,
// input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampled,
// input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampled,
// parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src
//#endif
if (useDepth) {
if (foundCorr) {
// point-to-plane residual
float3 diff = camPosTgt - camPosSrcToTgt;
depthRes = dot(diff, normalTgt);
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, 0.5f*((1.0f - length(diff) / parameters.denseDistThresh) + (1.0f - camPosTgt.z / parameters.denseDepthMax)));
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 2.0f)); //fr1_desk
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 2.5f)); //fr3_office, fr2_xyz_half // livingroom1
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 3.0f)); //fr3_nstn
//depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.5f), 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.0f), 1.8f)); //fr3_office, fr1_desk_f20
depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.0f), 2.5f)); //fr2_xyz_half
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 3.5f), 1.8f)); //fr3_nstn
//depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / parameters.denseDepthMax), 1.8f)); //TODO EVAL DEBUGGING
//float wtgt = (pow(max(0.0f, 1.0f - camPosTgt.z / 2.5f), 1.8f));
//float wsrc = (pow(max(0.0f, 1.0f - camPosSrc.z / 2.5f), 1.8f));
//depthWeight = parameters.weightDenseDepth * imPairWeight * wtgt * wsrc;
#ifdef USE_LIE_SPACE
if (i > 0) computeJacobianBlockRow_i(depthJacBlockRow_i, transform_i, invTransform_j, camPosSrc, normalTgt);
if (j > 0) computeJacobianBlockRow_j(depthJacBlockRow_j, invTransform_i, transform_j, camPosSrc, normalTgt);
#else
if (i > 0) computeJacobianBlockRow_i(depthJacBlockRow_i, state.d_xRot[i], state.d_xTrans[i], transform_j, camPosSrc, normalTgt);
if (j > 0) computeJacobianBlockRow_j(depthJacBlockRow_j, state.d_xRot[j], state.d_xTrans[j], invTransform_i, camPosSrc, normalTgt);
#endif
}
addToLocalSystem(foundCorr, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx
, state.d_sumResidual, state.d_corrCount);
//addToLocalSystemBrute(foundCorr, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
// depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx);
}
if (useColor) {
bool foundCorrColor = false;
if (foundCorr) {
const float2 intensityDerivTgt = bilinearInterpolationFloat2(tgtScreenPos.x, tgtScreenPos.y, input.d_cacheFrames[i].d_intensityDerivsDownsampled, input.denseDepthWidth, input.denseDepthHeight);
const float intensityTgt = bilinearInterpolationFloat(tgtScreenPos.x, tgtScreenPos.y, input.d_cacheFrames[i].d_intensityDownsampled, input.denseDepthWidth, input.denseDepthHeight);
colorRes = intensityTgt - input.d_cacheFrames[j].d_intensityDownsampled[srcIdx];
foundCorrColor = (intensityDerivTgt.x != MINF && abs(colorRes) < parameters.denseColorThresh && length(intensityDerivTgt) > parameters.denseColorGradientMin);
if (foundCorrColor) {
const float2 focalLength = make_float2(input.intrinsics.x, input.intrinsics.y);
#ifdef USE_LIE_SPACE
if (i > 0) computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, transform_i, invTransform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
if (j > 0) computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, invTransform_i, transform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
#else
if (i > 0) computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, state.d_xRot[i], state.d_xTrans[i], transform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
if (j > 0) computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, state.d_xRot[j], state.d_xTrans[j], invTransform_i, camPosSrc, camPosSrcToTgt, intensityDerivTgt);
#endif
colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 1.0f - abs(colorRes) / (1.15f*parameters.denseColorThresh));
//colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 1.0f - abs(colorRes) / parameters.denseColorThresh) * max(0.0f, (1.0f - camPosTgt.z / 1.0f));
//colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 0.5f*(1.0f - abs(colorRes) / parameters.denseColorThresh) + 0.5f*max(0.0f, (1.0f - camPosTgt.z / 1.0f)));
}
}
addToLocalSystem(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx
, state.d_sumResidualColor, state.d_corrCountColor);
//addToLocalSystemBrute(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6,
// colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx);
}
} // valid image pixel
}
bool BuildDenseSystem(const SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
const unsigned int N = input.numberOfImages;
const int sizeJtr = 6 * N;
const int sizeJtJ = sizeJtr * sizeJtr;
#ifdef PRINT_RESIDUALS_DENSE
cutilSafeCall(cudaMemset(state.d_corrCount, 0, sizeof(int)));
cutilSafeCall(cudaMemset(state.d_sumResidual, 0, sizeof(float)));
cutilSafeCall(cudaMemset(state.d_corrCountColor, 0, sizeof(int)));
cutilSafeCall(cudaMemset(state.d_sumResidualColor, 0, sizeof(float)));
#endif
const unsigned int maxDenseImPairs = input.numberOfImages * (input.numberOfImages - 1) / 2;
cutilSafeCall(cudaMemset(state.d_denseCorrCounts, 0, sizeof(float) * maxDenseImPairs));
cutilSafeCall(cudaMemset(state.d_denseJtJ, 0, sizeof(float) * sizeJtJ));
cutilSafeCall(cudaMemset(state.d_denseJtr, 0, sizeof(float) * sizeJtr));
cutilSafeCall(cudaMemset(state.d_numDenseOverlappingImages, 0, sizeof(int)));
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
dim3 gridImImOverlap;
if (parameters.useDenseDepthAllPairwise) gridImImOverlap = dim3(N, N, 1); // pairwise
else gridImImOverlap = dim3(N - 1, 1, 1); // for frame-to-frame
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("BuildDenseDepthSystem - find image corr");
if (parameters.useDenseDepthAllPairwise) FindImageImageCorr_Kernel<true> << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> >(input, state, parameters);
else FindImageImageCorr_Kernel<false> << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
int numOverlapImagePairs;
cutilSafeCall(cudaMemcpy(&numOverlapImagePairs, state.d_numDenseOverlappingImages, sizeof(int), cudaMemcpyDeviceToHost));
if (numOverlapImagePairs == 0) {
printf("warning: no overlapping images for dense solve\n");
return false;
}
const int reductionGlobal = (input.denseDepthWidth*input.denseDepthHeight + THREADS_PER_BLOCK_DENSE_DEPTH - 1) / THREADS_PER_BLOCK_DENSE_DEPTH;
dim3 grid(numOverlapImagePairs, reductionGlobal);
//if (N > 11) printf("num overlap image pairs = %d\n", numOverlapImagePairs); //debugging only
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("BuildDenseDepthSystem - compute im-im weights");
FindDenseCorrespondences_Kernel << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging //remember the delete!
//float* denseCorrCounts = new float[numOverlapImagePairs];
//cutilSafeCall(cudaMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*numOverlapImagePairs, cudaMemcpyDeviceToHost));
//unsigned int totalCount = 0;
//for (unsigned int i = 0; i < numOverlapImagePairs; i++) { totalCount += (unsigned int)denseCorrCounts[i]; }
//printf("total count = %d\n", totalCount);
//uint2* imageIndices = new uint2[numOverlapImagePairs];
//cutilSafeCall(cudaMemcpy(imageIndices, state.d_denseOverlappingImages, sizeof(uint2)*numOverlapImagePairs, cudaMemcpyDeviceToHost));
//if (imageIndices) delete[] imageIndices;
////debugging
//debugging - compute some overlap stats
//if (true || input.numberOfImages > 11) {
// float4x4* transforms = new float4x4[input.numberOfImages];
// float* denseCorrCounts = new float[numOverlapImagePairs];
// uint2* imageIndices = new uint2[numOverlapImagePairs];
// cutilSafeCall(cudaMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*numOverlapImagePairs, cudaMemcpyDeviceToHost));
// cutilSafeCall(cudaMemcpy(imageIndices, state.d_denseOverlappingImages, sizeof(uint2)*numOverlapImagePairs, cudaMemcpyDeviceToHost));
// cutilSafeCall(cudaMemcpy(transforms, state.d_xTransforms, sizeof(float4x4)*input.numberOfImages, cudaMemcpyDeviceToHost));
// FILE* fp = fopen("debug/overlaps.csv", "w");
// char buffer[128];
// for (int i = 0; i < numOverlapImagePairs; i++) {
// if (denseCorrCounts[i] > 0) {
// float3 d = transforms[imageIndices[i].x].getTranslation() - transforms[imageIndices[i].y].getTranslation();
// sprintf(buffer, "%d,%d,%d,%f\n", imageIndices[i].x, imageIndices[i].y, (int)denseCorrCounts[i], length(d));
// fwrite(buffer, sizeof(char), strlen(buffer), fp);
// }
// }
// fclose(fp);
// if (transforms) delete[] transforms;
// if (denseCorrCounts) delete[] denseCorrCounts;
// if (imageIndices) delete[] imageIndices;
// int a = 5;
//}
int wgrid = (numOverlapImagePairs + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
WeightDenseCorrespondences_Kernel << < wgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >(maxDenseImPairs, state);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging
//cutilSafeCall(cudaMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*maxDenseImPairs, cudaMemcpyDeviceToHost));
//totalCount = 0;
//for (unsigned int i = 0; i < maxDenseImPairs; i++) { if (denseCorrCounts[i] > 0.0f) totalCount++; }
//printf("total count = %d\n", totalCount);
//if (denseCorrCounts) delete[] denseCorrCounts;
////debugging
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("BuildDenseDepthSystem - build jtj/jtr");
if (parameters.weightDenseDepth > 0.0f) {
if (parameters.weightDenseColor > 0.0f) BuildDenseSystem_Kernel<true, true> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
else BuildDenseSystem_Kernel<true, false> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
}
else {
BuildDenseSystem_Kernel<false, true> << <grid, THREADS_PER_BLOCK_DENSE_DEPTH >> >(input, state, parameters);
}
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
////debugging
//bool debugPrint = true;
//float* h_JtJ = NULL;
//float* h_Jtr = NULL;
//if (debugPrint) {
// h_JtJ = new float[sizeJtJ];
// h_Jtr = new float[sizeJtr];
// cutilSafeCall(cudaMemcpy(h_JtJ, state.d_denseJtJ, sizeof(float) * sizeJtJ, cudaMemcpyDeviceToHost));
// cutilSafeCall(cudaMemcpy(h_Jtr, state.d_denseJtr, sizeof(float) * sizeJtr, cudaMemcpyDeviceToHost));
// printf("JtJ:\n");
// //for (unsigned int i = 0; i < 6 * N; i++) {
// // for (unsigned int j = 0; j < 6 * N; j++)
// for (unsigned int i = 6 * 1; i < 6 * 2; i++) {
// for (unsigned int j = 6 * 1; j < 6 * 2; j++)
// printf(" %f,", h_JtJ[j * 6 * N + i]);
// printf("\n");
// }
// printf("Jtr:\n");
// for (unsigned int i = 0; i < 6 * N; i++) {
// printf(" %f,", h_Jtr[i]);
// }
// printf("\n");
//}
////debugging
#ifdef PRINT_RESIDUALS_DENSE
if (parameters.weightDenseDepth > 0) {
float sumResidual; int corrCount;
cutilSafeCall(cudaMemcpy(&sumResidual, state.d_sumResidual, sizeof(float), cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(&corrCount, state.d_corrCount, sizeof(int), cudaMemcpyDeviceToHost));
printf("\tdense depth: weights * residual = %g * %g = %g\t[#corr = %d]\n", parameters.weightDenseDepth, sumResidual / parameters.weightDenseDepth, sumResidual, corrCount);
}
if (parameters.weightDenseColor > 0) {
float sumResidual; int corrCount;
cutilSafeCall(cudaMemcpy(&sumResidual, state.d_sumResidualColor, sizeof(float), cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(&corrCount, state.d_corrCountColor, sizeof(int), cudaMemcpyDeviceToHost));
printf("\tdense color: weights * residual = %g * %g = %g\t[#corr = %d]\n", parameters.weightDenseColor, sumResidual / parameters.weightDenseColor, sumResidual, corrCount);
}
#endif
const unsigned int flipgrid = (sizeJtJ + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP;
FlipJtJ_Kernel << <flipgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >(sizeJtJ, sizeJtr, state.d_denseJtJ);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
return true;
}
//todo more efficient?? (there are multiple per image-image...)
//get high residuals
__global__ void collectHighResidualsDevice(SolverInput input, SolverState state, SolverStateAnalysis analysis, SolverParameters parameters, unsigned int maxNumHighResiduals)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
if (residual > parameters.highResidualThresh) {
int idx = atomicAdd(state.d_countHighResidual, 1);
if (idx < maxNumHighResiduals) {
analysis.d_maxResidual[idx] = residual;
analysis.d_maxResidualIndex[idx] = corrIdx;
}
}
}
}
extern "C" void collectHighResiduals(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, SolverParameters& parameters, CUDATimer* timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
cutilSafeCall(cudaMemset(state.d_countHighResidual, 0, sizeof(int)));
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
unsigned int maxNumHighResiduals = (input.maxCorrPerImage*input.maxNumberOfImages + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
collectHighResidualsDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, analysis, parameters, maxNumHighResiduals);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
}
/////////////////////////////////////////////////////////////////////////
// Eval Max Residual
/////////////////////////////////////////////////////////////////////////
__global__ void EvalMaxResidualDevice(SolverInput input, SolverState state, SolverStateAnalysis analysis, SolverParameters parameters)
{
__shared__ int maxResIndex[THREADS_PER_BLOCK];
__shared__ float maxRes[THREADS_PER_BLOCK];
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
maxResIndex[threadIdx.x] = 0;
maxRes[threadIdx.x] = 0.0f;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
maxRes[threadIdx.x] = residual;
maxResIndex[threadIdx.x] = corrIdx;
__syncthreads();
for (int stride = THREADS_PER_BLOCK / 2; stride > 0; stride /= 2) {
if (threadIdx.x < stride) {
int first = threadIdx.x;
int second = threadIdx.x + stride;
if (maxRes[first] < maxRes[second]) {
maxRes[first] = maxRes[second];
maxResIndex[first] = maxResIndex[second];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
//printf("d_maxResidual[%d] = %g (index %d)\n", blockIdx.x, maxRes[0], maxResIndex[0]);
analysis.d_maxResidual[blockIdx.x] = maxRes[0];
analysis.d_maxResidualIndex[blockIdx.x] = maxResIndex[0];
}
}
}
extern "C" void evalMaxResidual(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, SolverParameters& parameters, CUDATimer* timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
EvalMaxResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, analysis, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
}
/////////////////////////////////////////////////////////////////////////
// Eval Cost
/////////////////////////////////////////////////////////////////////////
__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0) state.d_sumResidual[0] = 0.0f;
}
__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float residual = 0.0f;
if (x < N) {
residual = evalFDevice(x, input, state, parameters);
}
residual = warpReduce(residual);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(&state.d_sumResidual[0], residual);
}
}
extern "C" float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
float residual = 0.0f;
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters);
EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
residual = state.getSumResidual();
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
return residual;
}
/////////////////////////////////////////////////////////////////////////
// Eval Linear Residual
/////////////////////////////////////////////////////////////////////////
//__global__ void SumLinearResDevice(SolverInput input, SolverState state, SolverParameters parameters)
//{
// const unsigned int N = input.numberOfImages; // Number of block variables
// const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
//
// float residual = 0.0f;
// if (x > 0 && x < N) {
// residual = dot(state.d_rRot[x], state.d_rRot[x]) + dot(state.d_rTrans[x], state.d_rTrans[x]);
// atomicAdd(state.d_sumLinResidual, residual);
// }
//}
//float EvalLinearRes(SolverInput& input, SolverState& state, SolverParameters& parameters)
//{
// float residual = 0.0f;
//
// const unsigned int N = input.numberOfImages; // Number of block variables
//
// // Do PCG step
// const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
//
// float init = 0.0f;
// cutilSafeCall(cudaMemcpy(state.d_sumLinResidual, &init, sizeof(float), cudaMemcpyHostToDevice));
//
// SumLinearResDevice << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
//#ifdef _DEBUG
// cutilSafeCall(cudaDeviceSynchronize());
// cutilCheckMsg(__FUNCTION__);
//#endif
//
// cutilSafeCall(cudaMemcpy(&residual, state.d_sumLinResidual, sizeof(float), cudaMemcpyDeviceToHost));
// return residual;
//}
/////////////////////////////////////////////////////////////////////////
// Count High Residuals
/////////////////////////////////////////////////////////////////////////
__global__ void CountHighResidualsDevice(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (corrIdx < N) {
float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters);
if (residual > parameters.verifyOptDistThresh)
atomicAdd(state.d_countHighResidual, 1);
}
}
extern "C" int countHighResiduals(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfCorrespondences; // Number of correspondences
cutilSafeCall(cudaMemset(state.d_countHighResidual, 0, sizeof(int)));
CountHighResidualsDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters);
int count;
cutilSafeCall(cudaMemcpy(&count, state.d_countHighResidual, sizeof(int), cudaMemcpyDeviceToHost));
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
return count;
}
/////////////////////////////////////////////////////////////////////////
// Convergence Analysis
/////////////////////////////////////////////////////////////////////////
//uses same data store as max residual
__global__ void EvalGNConvergenceDevice(SolverInput input, SolverStateAnalysis analysis, SolverState state) //compute max of delta
{
__shared__ float maxVal[THREADS_PER_BLOCK];
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
maxVal[threadIdx.x] = 0.0f;
if (x < N)
{
if (x == 0 || input.d_validImages[x] == 0)
maxVal[threadIdx.x] = 0.0f;
else {
float3 r3 = fmaxf(fabs(state.d_deltaRot[x]), fabs(state.d_deltaTrans[x]));
float r = fmaxf(r3.x, fmaxf(r3.y, r3.z));
maxVal[threadIdx.x] = r;
}
__syncthreads();
for (int stride = THREADS_PER_BLOCK / 2; stride > 0; stride /= 2) {
if (threadIdx.x < stride) {
int first = threadIdx.x;
int second = threadIdx.x + stride;
maxVal[first] = fmaxf(maxVal[first], maxVal[second]);
}
__syncthreads();
}
if (threadIdx.x == 0) {
analysis.d_maxResidual[blockIdx.x] = maxVal[0];
}
}
}
float EvalGNConvergence(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, CUDATimer* timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
const unsigned int N = input.numberOfImages;
const unsigned int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
EvalGNConvergenceDevice << < blocksPerGrid, THREADS_PER_BLOCK >> >(input, analysis, state);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//copy to host and compute max
cutilSafeCall(cudaMemcpy(analysis.h_maxResidual, analysis.d_maxResidual, sizeof(float) * blocksPerGrid, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(analysis.h_maxResidualIndex, analysis.d_maxResidualIndex, sizeof(int) * blocksPerGrid, cudaMemcpyDeviceToHost));
float maxVal = 0.0f;
for (unsigned int i = 0; i < blocksPerGrid; i++) {
if (maxVal < analysis.h_maxResidual[i]) maxVal = analysis.h_maxResidual[i];
}
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
return maxVal;
}
// For the naming scheme of the variables see:
// http://en.wikipedia.org/wiki/Conjugate_gradient_method
// This code is an implementation of their PCG pseudo code
template<bool useDense>
__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N)
{
float3 resRot, resTrans;
evalMinusJTFDevice<useDense>(x, input, state, parameters, resRot, resTrans); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0
state.d_rRot[x] = resRot; // store for next iteration
state.d_rTrans[x] = resTrans; // store for next iteration
const float3 pRot = state.d_precondionerRot[x] * resRot; // apply preconditioner M^-1
state.d_pRot[x] = pRot;
const float3 pTrans = state.d_precondionerTrans[x] * resTrans; // apply preconditioner M^-1
state.d_pTrans[x] = pTrans;
d = dot(resRot, pRot) + dot(resTrans, pTrans); // x-th term of nomimator for computing alpha and denominator for computing beta
state.d_Ap_XRot[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_XTrans[x] = make_float3(0.0f, 0.0f, 0.0f);
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGInit_Kernel2(unsigned int N, SolverState state)
{
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N) state.d_rDotzOld[x] = state.d_scanAlpha[0]; // store result for next kernel call
}
void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer)
{
const unsigned int N = input.numberOfImages;
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("Initialization");
//!!!DEBUGGING //remember to uncomment the delete...
//float3* rRot = new float3[input.numberOfImages]; // -jtf
//float3* rTrans = new float3[input.numberOfImages];
//!!!DEBUGGING
cutilSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float)));
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (parameters.useDense) PCGInit_Kernel1<true> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
else PCGInit_Kernel1<false> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//cutilSafeCall(cudaMemcpy(rRot, state.d_rRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//cutilSafeCall(cudaMemcpy(rTrans, state.d_rTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rRot[i].x)) { printf("NaN in jtr rRot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rTrans[i].x)) { printf("NaN in jtr rTrans %d\n", i); getchar(); } }
//cutilSafeCall(cudaMemcpy(rRot, state.d_pRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//cutilSafeCall(cudaMemcpy(rTrans, state.d_pTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rRot[i].x)) { printf("NaN in jtr pRot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rTrans[i].x)) { printf("NaN in jtr pTrans %d\n", i); getchar(); } }
PCGInit_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(N, state);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
//float scanAlpha;
//cutilSafeCall(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost));
//if (rRot) delete[] rRot;
//if (rTrans) delete[] rTrans;
}
/////////////////////////////////////////////////////////////////////////
// PCG Iteration Parts
/////////////////////////////////////////////////////////////////////////
//inefficient
__global__ void PCGStep_Kernel_Dense_Brute(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTJDenseBruteDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans); // A x p_k => J^T x J x p_k
state.d_Ap_XRot[x] += rot;
state.d_Ap_XTrans[x] += trans;
}
}
__global__ void PCGStep_Kernel_Dense(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
const unsigned int lane = threadIdx.x % WARP_SIZE;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTJDenseDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans, threadIdx.x); // A x p_k => J^T x J x p_k
if (lane == 0)
{
atomicAdd(&state.d_Ap_XRot[x].x, rot.x);
atomicAdd(&state.d_Ap_XRot[x].y, rot.y);
atomicAdd(&state.d_Ap_XRot[x].z, rot.z);
atomicAdd(&state.d_Ap_XTrans[x].x, trans.x);
atomicAdd(&state.d_Ap_XTrans[x].y, trans.y);
atomicAdd(&state.d_Ap_XTrans[x].z, trans.z);
}
}
}
__global__ void PCGStep_Kernel0(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N)
{
const float3 tmp = applyJDevice(x, input, state, parameters); // A x p_k => J^T x J x p_k
state.d_Jp[x] = tmp; // store for next kernel call
}
}
__global__ void PCGStep_Kernel1a(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x;
const unsigned int lane = threadIdx.x % WARP_SIZE;
if (x > 0 && x < N)
{
float3 rot, trans;
applyJTDevice(x, input, state, parameters, rot, trans, threadIdx.x, lane); // A x p_k => J^T x J x p_k
if (lane == 0)
{
atomicAdd(&state.d_Ap_XRot[x].x, rot.x);
atomicAdd(&state.d_Ap_XRot[x].y, rot.y);
atomicAdd(&state.d_Ap_XRot[x].z, rot.z);
atomicAdd(&state.d_Ap_XTrans[x].x, trans.x);
atomicAdd(&state.d_Ap_XTrans[x].y, trans.y);
atomicAdd(&state.d_Ap_XTrans[x].z, trans.z);
}
}
}
__global__ void PCGStep_Kernel1b(SolverInput input, SolverState state, SolverParameters parameters)
{
const unsigned int N = input.numberOfImages; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float d = 0.0f;
if (x > 0 && x < N)
{
d = dot(state.d_pRot[x], state.d_Ap_XRot[x]) + dot(state.d_pTrans[x], state.d_Ap_XTrans[x]); // x-th term of denominator of alpha
}
d = warpReduce(d);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(state.d_scanAlpha, d);
}
}
__global__ void PCGStep_Kernel2(SolverInput input, SolverState state)
{
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
const float dotProduct = state.d_scanAlpha[0];
float b = 0.0f;
if (x > 0 && x < N)
{
float alpha = 0.0f;
if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha
state.d_deltaRot[x] = state.d_deltaRot[x] + alpha*state.d_pRot[x]; // do a decent step
state.d_deltaTrans[x] = state.d_deltaTrans[x] + alpha*state.d_pTrans[x]; // do a decent step
float3 rRot = state.d_rRot[x] - alpha*state.d_Ap_XRot[x]; // update residuum
state.d_rRot[x] = rRot; // store for next kernel call
float3 rTrans = state.d_rTrans[x] - alpha*state.d_Ap_XTrans[x]; // update residuum
state.d_rTrans[x] = rTrans; // store for next kernel call
float3 zRot = state.d_precondionerRot[x] * rRot; // apply preconditioner M^-1
state.d_zRot[x] = zRot; // save for next kernel call
float3 zTrans = state.d_precondionerTrans[x] * rTrans; // apply preconditioner M^-1
state.d_zTrans[x] = zTrans; // save for next kernel call
b = dot(zRot, rRot) + dot(zTrans, rTrans); // compute x-th term of the nominator of beta
}
b = warpReduce(b);
if (threadIdx.x % WARP_SIZE == 0)
{
atomicAdd(&state.d_scanAlpha[1], b);
}
}
template<bool lastIteration>
__global__ void PCGStep_Kernel3(SolverInput input, SolverState state)
{
const unsigned int N = input.numberOfImages;
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > 0 && x < N)
{
const float rDotzNew = state.d_scanAlpha[1]; // get new nominator
const float rDotzOld = state.d_rDotzOld[x]; // get old denominator
float beta = 0.0f;
if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta
state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration
state.d_pRot[x] = state.d_zRot[x] + beta*state.d_pRot[x]; // update decent direction
state.d_pTrans[x] = state.d_zTrans[x] + beta*state.d_pTrans[x]; // update decent direction
state.d_Ap_XRot[x] = make_float3(0.0f, 0.0f, 0.0f);
state.d_Ap_XTrans[x] = make_float3(0.0f, 0.0f, 0.0f);
if (lastIteration)
{
//if (input.d_validImages[x]) { //not really necessary
#ifdef USE_LIE_SPACE //TODO just keep that matrix transforms around
float3 rot, trans;
computeLieUpdate(state.d_deltaRot[x], state.d_deltaTrans[x], state.d_xRot[x], state.d_xTrans[x], rot, trans);
state.d_xRot[x] = rot;
state.d_xTrans[x] = trans;
#else
state.d_xRot[x] = state.d_xRot[x] + state.d_deltaRot[x];
state.d_xTrans[x] = state.d_xTrans[x] + state.d_deltaTrans[x];
#endif
//}
}
}
}
template<bool useSparse, bool useDense>
bool PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverStateAnalysis& analysis, bool lastIteration, CUDATimer *timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("Full PCGIteration");
const unsigned int N = input.numberOfImages; // Number of block variables
// Do PCG step
const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (blocksPerGrid > THREADS_PER_BLOCK)
{
std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl;
while (1);
}
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("Memset scanalpha");
cutilSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float) * 2));
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
// sparse part
if (useSparse) {
const unsigned int Ncorr = input.numberOfCorrespondences;
const int blocksPerGridCorr = (Ncorr + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep1 (Sparse J)");
PCGStep_Kernel0 << <blocksPerGridCorr, THREADS_PER_BLOCK >> >(input, state, parameters);
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep1 (Sparse Jt)");
PCGStep_Kernel1a << < N, THREADS_PER_BLOCK_JT >> >(input, state, parameters);
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
if (useDense) {
//if (timer) timer->startEvent("apply JTJ dense");
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep1 (Dense JtJ)");
PCGStep_Kernel_Dense << < N, THREADS_PER_BLOCK_JT_DENSE >> >(input, state, parameters);
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
//PCGStep_Kernel_Dense_Brute << < N, 1 >> >(input, state, parameters);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
//if (timer) timer->endEvent();
}
//!!!debugging
//float3* Ap_Rot = new float3[input.numberOfImages];
//float3* Ap_Trans = new float3[input.numberOfImages];
//cutilSafeCall(cudaMemcpy(Ap_Rot, state.d_Ap_XRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//cutilSafeCall(cudaMemcpy(Ap_Trans, state.d_Ap_XTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//for (unsigned int i = 1; i < input.maxNumberOfImages; i++) { if (isnan(Ap_Rot[i].x)) { printf("NaN at Ap rot %d\n", i); getchar(); } }
//for (unsigned int i = 1; i < input.maxNumberOfImages; i++) { if (isnan(Ap_Trans[i].x)) { printf("NaN at Ap trans %d\n", i); getchar(); } }
//if (Ap_Rot) delete[] Ap_Rot;
//if (Ap_Trans) delete[] Ap_Trans;
//!!!debugging
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep1 (Finish)");
PCGStep_Kernel1b << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state, parameters);
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep2");
PCGStep_Kernel2 << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
#ifdef ENABLE_EARLY_OUT //for convergence
float scanAlpha; cutilSafeCall(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost));
//if (fabs(scanAlpha) < 0.00005f) lastIteration = true; //todo check this part
//if (fabs(scanAlpha) < 1e-6) lastIteration = true; //todo check this part
if (fabs(scanAlpha) < 5e-7) { lastIteration = true; } //todo check this part
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("PCGStep3");
if (lastIteration) {
PCGStep_Kernel3<true> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
}
else {
PCGStep_Kernel3<false> << <blocksPerGrid, THREADS_PER_BLOCK >> >(input, state);
}
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
return lastIteration;
}
#ifdef USE_LIE_SPACE //TODO
////////////////////////////////////////////////////////////////////
// matrix <-> pose
////////////////////////////////////////////////////////////////////
__global__ void convertLiePosesToMatricesCU_Kernel(const float3* d_rot, const float3* d_trans, unsigned int numTransforms, float4x4* d_transforms, float4x4* d_transformInvs)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numTransforms) {
poseToMatrix(d_rot[idx], d_trans[idx], d_transforms[idx]);
d_transformInvs[idx] = d_transforms[idx].getInverse();
}
}
extern "C"
void convertLiePosesToMatricesCU(const float3* d_rot, const float3* d_trans, unsigned int numTransforms, float4x4* d_transforms, float4x4* d_transformInvs, CUDATimer *timer)
{
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent("Lie -> Matrix");
convertLiePosesToMatricesCU_Kernel << <(numTransforms + 8 - 1) / 8, 8 >> >(d_rot, d_trans, numTransforms, d_transforms, d_transformInvs);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
}
#endif
////////////////////////////////////////////////////////////////////
// Main GN Solver Loop
////////////////////////////////////////////////////////////////////
extern "C" void solveBundlingStub(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverStateAnalysis& analysis, float* convergenceAnalysis, CUDATimer *timer, bool dumpInputOutput)
{
if (convergenceAnalysis) {
float initialResidual = EvalResidual(input, state, parameters, timer);
convergenceAnalysis[0] = initialResidual; // initial residual
}
float initResidual, endResidual;
if (input.numberOfImages > 11) {
initResidual = EvalResidual(input, state, parameters, timer);
}
//!!!DEBUGGING
#ifdef PRINT_RESIDUALS_SPARSE
if (parameters.weightSparse > 0) {
if (input.numberOfCorrespondences == 0) { printf("ERROR: %d correspondences\n", input.numberOfCorrespondences); getchar(); }
float initialResidual = EvalResidual(input, state, parameters, timer);
printf("initial sparse = %g*%g = %g\n", parameters.weightSparse, initialResidual / parameters.weightSparse, initialResidual);
}
#endif
//float3* xRot = new float3[input.numberOfImages]; //remember the delete!
//float3* xTrans = new float3[input.numberOfImages];
//timer = new CUDATimer();
//static unsigned int totalLinIters = 0, numLin = 0, totalNonLinIters = 0, numNonLin = 0;
//!!!DEBUGGING
if(timer) timer->startEvent("Total");
for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++)
{
if (timer) timer->startEvent("Nonlinear Iteration");
if (timer) timer->startEvent("Nonlinear Setup");
parameters.weightSparse = input.weightsSparse[nIter];
parameters.weightDenseDepth = input.weightsDenseDepth[nIter];
parameters.weightDenseColor = input.weightsDenseColor[nIter];
parameters.useDense = (parameters.weightDenseDepth > 0 || parameters.weightDenseColor > 0);
#ifdef USE_LIE_SPACE
convertLiePosesToMatricesCU(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms, state.d_xTransformInverses, timer);
#endif
if (parameters.useDense) parameters.useDense = BuildDenseSystem(input, state, parameters, timer); //don't solve dense if no overlapping frames found
Initialization(input, state, parameters, timer);
if (timer) timer->endEvent();
if (timer) timer->startEvent("Linear Solve");
if (parameters.weightSparse > 0.0f) {
if (parameters.useDense) {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<true, true>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) {
break;
}
} else {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<true, false>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) {
//totalLinIters += (linIter+1); numLin++;
break;
}
}
} else {
for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++)
if (PCGIteration<false, true>(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) {
break;
}
}
if (timer) timer->endEvent();
if (timer) timer->startEvent("Nonlinear Finish");
//!!!debugging
//cutilSafeCall(cudaMemcpy(xRot, state.d_xRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//cutilSafeCall(cudaMemcpy(xTrans, state.d_xTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost));
//!!!debugging
#ifdef PRINT_RESIDUALS_SPARSE
if (parameters.weightSparse > 0) {
float residual = EvalResidual(input, state, parameters, timer);
printf("[niter %d] weight * sparse = %g*%g = %g\t[#corr = %d]\n", nIter, parameters.weightSparse, residual / parameters.weightSparse, residual, input.numberOfCorrespondences);
}
#endif
if (convergenceAnalysis) {
float residual = EvalResidual(input, state, parameters, timer);
convergenceAnalysis[nIter + 1] = residual;
}
if (timer) timer->endEvent();
if (timer) timer->endEvent();
#ifdef ENABLE_EARLY_OUT //convergence
//if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.01f) { //!!! TODO CHECK HOW THESE GENERALIZE
if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.005f) { //0.001?
//if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.001f) {
//if (!parameters.useDense) { totalNonLinIters += (nIter+1); numNonLin++; }
break;
}
//else if (!parameters.useDense && nIter == parameters.nNonLinearIterations - 1) { totalNonLinIters += (nIter+1); numNonLin++; }
#endif
}
#ifdef PRINT_RESIDUALS_DENSE
// So we can get a final result.
unsigned int nIter = parameters.nNonLinearIterations - 1;
parameters.weightSparse = input.weightsSparse[nIter];
parameters.weightDenseDepth = input.weightsDenseDepth[nIter];
parameters.weightDenseColor = input.weightsDenseColor[nIter];
parameters.useDense = (parameters.weightDenseDepth > 0 || parameters.weightDenseColor > 0);
#ifdef USE_LIE_SPACE
convertLiePosesToMatricesCU(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms, state.d_xTransformInverses, timer);
#endif
if (parameters.useDense) parameters.useDense = BuildDenseSystem(input, state, parameters, timer); //don't solve dense if no overlapping frames found
#endif
if (timer) timer->endEvent();
if (input.numberOfImages > 11) {
endResidual = EvalResidual(input, state, parameters, timer);
if (initResidual > 200 * endResidual) {
printf("Large residual decrease in global solve!: %g -> %g; %g\n", initResidual, endResidual, initResidual/endResidual);
}
}
//!!!debugging
//if (xRot) delete[] xRot;
//if (xTrans) delete[] xTrans;
//if (timer) { timer->evaluate(true, false); delete timer; }
//if (!parameters.useDense) { printf("mean #pcg its = %f\tmean #gn its = %f\n", (float)totalLinIters / (float)numLin, (float)totalNonLinIters / (float)numNonLin); } //just stats for global solve
//!!!debugging
}
////////////////////////////////////////////////////////////////////
// build variables to correspondences lookup
////////////////////////////////////////////////////////////////////
__global__ void BuildVariablesToCorrespondencesTableDevice(EntryJ* d_correspondences, unsigned int numberOfCorrespondences,
unsigned int maxNumCorrespondencesPerImage, int* d_variablesToCorrespondences, int* d_numEntriesPerRow)
{
const unsigned int N = numberOfCorrespondences; // Number of block variables
const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < N) {
EntryJ& corr = d_correspondences[x];
if (corr.isValid()) {
int offset0 = atomicAdd(&d_numEntriesPerRow[corr.imgIdx_i], 1); // may overflow - need to check when read
int offset1 = atomicAdd(&d_numEntriesPerRow[corr.imgIdx_j], 1); // may overflow - need to check when read
if (offset0 < maxNumCorrespondencesPerImage && offset1 < maxNumCorrespondencesPerImage) {
d_variablesToCorrespondences[corr.imgIdx_i * maxNumCorrespondencesPerImage + offset0] = x;
d_variablesToCorrespondences[corr.imgIdx_j * maxNumCorrespondencesPerImage + offset1] = x;
}
else { //invalidate
printf("EXCEEDED MAX NUM CORR PER IMAGE IN SOLVER, INVALIDATING %d(%d,%d) [%d,%d | %d]\n",
x, corr.imgIdx_i, corr.imgIdx_j, offset0, offset1, maxNumCorrespondencesPerImage); //debugging
corr.setInvalid(); //make sure j corresponds to jt
}
}
}
}
extern "C" void buildVariablesToCorrespondencesTableCUDA(EntryJ* d_correspondences, unsigned int numberOfCorrespondences, unsigned int maxNumCorrespondencesPerImage, int* d_variablesToCorrespondences, int* d_numEntriesPerRow, CUDATimer* timer)
{
const unsigned int N = numberOfCorrespondences;
if (timer && TIME_INDIVIDUAL_STAGES) timer->startEvent(__FUNCTION__);
BuildVariablesToCorrespondencesTableDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_correspondences, numberOfCorrespondences, maxNumCorrespondencesPerImage, d_variablesToCorrespondences, d_numEntriesPerRow);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
if (timer && TIME_INDIVIDUAL_STAGES) timer->endEvent();
}
|
6cbcf09e539b3240295a36acf3ed410e92da7936.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Noel Lopes is an Assistant Professor at the Polytechnic Institute of Guarda, Portugal
Copyright (C) 2009, 2010, 2011, 2012 Noel de Jesus Mendona Lopes
This file is part of GPUMLib.
GPUMLib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "RBMconfig.h"
#include "../reduction/SumWarp.h"
namespace GPUMLib {
__device__ __forceinline__ void UpdateWeight(cudafloat learningRate, cudafloat momentum, cudafloat delta, cudafloat * lastDelta, cudafloat * lastDeltaWithoutLearningMomentum, cudafloat * weights, int w) {
momentum *= learningRate;
if (momentum < CUDA_VALUE(0.1)) momentum = CUDA_VALUE(0.1);
if (momentum > CUDA_VALUE(0.9)) momentum = CUDA_VALUE(0.9);
cudafloat neww = weights[w] + learningRate * delta + momentum * lastDelta[w];
delta += momentum * lastDelta[w];
if (IsInfOrNaN(neww)) {
delta = CUDA_VALUE(0.0);
lastDeltaWithoutLearningMomentum[w] = CUDA_VALUE(0.0);
} else {
weights[w] = neww;
}
lastDelta[w] = delta;
}
__device__ __forceinline__ cudafloat UpdateLearningRate(cudafloat * lr, cudafloat * lastDeltaWithoutLearningMomentum, cudafloat delta, int w, cudafloat u, cudafloat d) {
cudafloat learningRate = lr[w];
learningRate *= (SAME_DIRECTION(lastDeltaWithoutLearningMomentum[w], delta) ? u : d);
if (learningRate > MAX_STEP_SIZE) learningRate = MAX_STEP_SIZE;
lr[w] = learningRate;
lastDeltaWithoutLearningMomentum[w] = delta;
return learningRate;
}
KERNEL CorrectWeightsRBM(cudafloat * v_data, cudafloat * h_data, cudafloat * v_recon, cudafloat * h_recon, int samples, cudafloat * learningRateW, cudafloat * lastDeltaWithoutLearningMomentumW, cudafloat * lastDeltaW, cudafloat * learningRateB, cudafloat * lastDeltaWithoutLearningMomentumB, cudafloat * lastDeltaB, cudafloat * learningRateA, cudafloat * lastDeltaWithoutLearningMomentumA, cudafloat * lastDeltaA, cudafloat u, cudafloat d, cudafloat momentum, cudafloat * weights, cudafloat * b, cudafloat * a, cudafloat * errors, int I, int J) {
__shared__ cudafloat vd[16];
__shared__ cudafloat vr[16];
__shared__ cudafloat hd[16];
__shared__ cudafloat hr[16];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
cudafloat error = CUDA_VALUE(0.0);
cudafloat deltaW = CUDA_VALUE(0.0);
cudafloat deltaB = CUDA_VALUE(0.0);
cudafloat deltaA = CUDA_VALUE(0.0);
for (int s = 0; s < samples; s++) {
if (threadIdx.y == 0 && i < I) {
cudafloat dat = v_data[s * I + i];
cudafloat rec = v_recon[s * I + i];
vd[threadIdx.x] = dat;
vr[threadIdx.x] = rec;
cudafloat e = dat - rec;
deltaA += e;
error += e * e;
}
if (threadIdx.x == 0 && j < J) {
cudafloat dat = h_data[s * J + j];
cudafloat rec = h_recon[s * J + j];
hd[threadIdx.y] = dat;
hr[threadIdx.y] = rec;
deltaB += dat - rec;
}
__syncthreads();
deltaW += vd[threadIdx.x] * hd[threadIdx.y] - vr[threadIdx.x] * hr[threadIdx.y];
}
// update weights
if (i < I && j < J) {
deltaW /= samples;
int w = j * I + i;
cudafloat learningRate = UpdateLearningRate(learningRateW, lastDeltaWithoutLearningMomentumW, deltaW, w, u, d);
UpdateWeight(learningRate, momentum, deltaW, lastDeltaW, lastDeltaWithoutLearningMomentumW, weights, w);
}
if(i < I && threadIdx.y == 0) {
errors[i] = error;
// Update a
if (j == 0) {
deltaA /= samples;
cudafloat learningRate = UpdateLearningRate(learningRateA, lastDeltaWithoutLearningMomentumA, deltaA, i, u, d);
UpdateWeight(learningRate, momentum, deltaA, lastDeltaA, lastDeltaWithoutLearningMomentumA, a, i);
}
}
// Update b
if (i == 0 && j < J) {
deltaB /= samples;
cudafloat learningRate = UpdateLearningRate(learningRateB, lastDeltaWithoutLearningMomentumB, deltaB, j, u, d);
UpdateWeight(learningRate, momentum, deltaB, lastDeltaB, lastDeltaWithoutLearningMomentumB, b, j);
}
}
} | 6cbcf09e539b3240295a36acf3ed410e92da7936.cu | /*
Noel Lopes is an Assistant Professor at the Polytechnic Institute of Guarda, Portugal
Copyright (C) 2009, 2010, 2011, 2012 Noel de Jesus Mendonša Lopes
This file is part of GPUMLib.
GPUMLib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "RBMconfig.h"
#include "../reduction/SumWarp.h"
namespace GPUMLib {
__device__ __forceinline__ void UpdateWeight(cudafloat learningRate, cudafloat momentum, cudafloat delta, cudafloat * lastDelta, cudafloat * lastDeltaWithoutLearningMomentum, cudafloat * weights, int w) {
momentum *= learningRate;
if (momentum < CUDA_VALUE(0.1)) momentum = CUDA_VALUE(0.1);
if (momentum > CUDA_VALUE(0.9)) momentum = CUDA_VALUE(0.9);
cudafloat neww = weights[w] + learningRate * delta + momentum * lastDelta[w];
delta += momentum * lastDelta[w];
if (IsInfOrNaN(neww)) {
delta = CUDA_VALUE(0.0);
lastDeltaWithoutLearningMomentum[w] = CUDA_VALUE(0.0);
} else {
weights[w] = neww;
}
lastDelta[w] = delta;
}
__device__ __forceinline__ cudafloat UpdateLearningRate(cudafloat * lr, cudafloat * lastDeltaWithoutLearningMomentum, cudafloat delta, int w, cudafloat u, cudafloat d) {
cudafloat learningRate = lr[w];
learningRate *= (SAME_DIRECTION(lastDeltaWithoutLearningMomentum[w], delta) ? u : d);
if (learningRate > MAX_STEP_SIZE) learningRate = MAX_STEP_SIZE;
lr[w] = learningRate;
lastDeltaWithoutLearningMomentum[w] = delta;
return learningRate;
}
KERNEL CorrectWeightsRBM(cudafloat * v_data, cudafloat * h_data, cudafloat * v_recon, cudafloat * h_recon, int samples, cudafloat * learningRateW, cudafloat * lastDeltaWithoutLearningMomentumW, cudafloat * lastDeltaW, cudafloat * learningRateB, cudafloat * lastDeltaWithoutLearningMomentumB, cudafloat * lastDeltaB, cudafloat * learningRateA, cudafloat * lastDeltaWithoutLearningMomentumA, cudafloat * lastDeltaA, cudafloat u, cudafloat d, cudafloat momentum, cudafloat * weights, cudafloat * b, cudafloat * a, cudafloat * errors, int I, int J) {
__shared__ cudafloat vd[16];
__shared__ cudafloat vr[16];
__shared__ cudafloat hd[16];
__shared__ cudafloat hr[16];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
cudafloat error = CUDA_VALUE(0.0);
cudafloat deltaW = CUDA_VALUE(0.0);
cudafloat deltaB = CUDA_VALUE(0.0);
cudafloat deltaA = CUDA_VALUE(0.0);
for (int s = 0; s < samples; s++) {
if (threadIdx.y == 0 && i < I) {
cudafloat dat = v_data[s * I + i];
cudafloat rec = v_recon[s * I + i];
vd[threadIdx.x] = dat;
vr[threadIdx.x] = rec;
cudafloat e = dat - rec;
deltaA += e;
error += e * e;
}
if (threadIdx.x == 0 && j < J) {
cudafloat dat = h_data[s * J + j];
cudafloat rec = h_recon[s * J + j];
hd[threadIdx.y] = dat;
hr[threadIdx.y] = rec;
deltaB += dat - rec;
}
__syncthreads();
deltaW += vd[threadIdx.x] * hd[threadIdx.y] - vr[threadIdx.x] * hr[threadIdx.y];
}
// update weights
if (i < I && j < J) {
deltaW /= samples;
int w = j * I + i;
cudafloat learningRate = UpdateLearningRate(learningRateW, lastDeltaWithoutLearningMomentumW, deltaW, w, u, d);
UpdateWeight(learningRate, momentum, deltaW, lastDeltaW, lastDeltaWithoutLearningMomentumW, weights, w);
}
if(i < I && threadIdx.y == 0) {
errors[i] = error;
// Update a
if (j == 0) {
deltaA /= samples;
cudafloat learningRate = UpdateLearningRate(learningRateA, lastDeltaWithoutLearningMomentumA, deltaA, i, u, d);
UpdateWeight(learningRate, momentum, deltaA, lastDeltaA, lastDeltaWithoutLearningMomentumA, a, i);
}
}
// Update b
if (i == 0 && j < J) {
deltaB /= samples;
cudafloat learningRate = UpdateLearningRate(learningRateB, lastDeltaWithoutLearningMomentumB, deltaB, j, u, d);
UpdateWeight(learningRate, momentum, deltaB, lastDeltaB, lastDeltaWithoutLearningMomentumB, b, j);
}
}
} |
498ea1dfce82a28cecfa033ccaa0c3f1b25fea2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
__global__ void GaussianFilter(unsigned char *img_in, unsigned char *img_out, int width, int height, int channels, int *gaussian_filter)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < width && row < height)
{
int gaussianSum, min_row, max_row, min_col, max_col, g_x, g_y, imageIndex;
for (int channel = 0; channel < channels; channel++)
{
imageIndex = (row * width + col) * channels + channel;
gaussianSum = 0;
min_row = row - GAUSSIAN_DIM / 2 > 0 ? row - GAUSSIAN_DIM / 2 : 0;
max_row = (row + GAUSSIAN_DIM / 2 + 1) < height ? (row + GAUSSIAN_DIM / 2 + 1) : height;
min_col = col - GAUSSIAN_DIM / 2 > 0 ? col - GAUSSIAN_DIM / 2 : 0;
max_col = (col + GAUSSIAN_DIM / 2 + 1) < width ? (col + GAUSSIAN_DIM / 2 + 1) : width;
g_x = 0;
for (int offX = min_row; offX < max_row; offX++)
{
g_y = 0;
for (int offY = min_col; offY < max_col; offY++)
{
gaussianSum += img_in[(offX * width + offY) * channels + channel] * gaussian_filter[g_x * GAUSSIAN_DIM + g_y];
g_y++;
}
g_x++;
}
img_out[imageIndex] = gaussianSum / 273;
}
}
}
__global__ void RGB2GRAY(unsigned char *img_in, unsigned char *img_out, int width, int height, int channels)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < width && row < height)
{
int index = (row * width + col) * channels;
img_out[row * width + col] = img_in[index] / 3 + img_in[index + 1] / 3 + img_in[index + 2] / 3;
}
}
__global__ void InitContour(double *contour, int width, int height)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < width && row < height)
{
int x = double(row) - height / 2.0;
int y = double(col) - width / 2.0;
contour[row * width + col] = 900.0 / (900.0 + x * x + y * y) - 0.5; //radius/(radius + x*x + y*y) - 0.5;
}
}
void Preprocess_kernel(Image &img_in, Image &img_out)
{
img_out.channels = img_in.channels;
img_out.height = img_in.height;
img_out.width = img_in.width;
img_out.size = img_in.size;
img_out.contour0 = (double *)malloc(sizeof(double) * img_out.width * img_out.height);
img_out.contour = (double *)malloc(sizeof(double) * img_out.width * img_out.height);
img_out.contourOld = (double *)malloc(sizeof(double) * img_out.width * img_out.height);
img_out.img = (unsigned char *)malloc(sizeof(unsigned char) * img_out.size);
memcpy(img_out.img, img_in.img, sizeof(unsigned char) * img_in.size);
int size = img_in.height * img_in.width * img_in.channels;
unsigned char *d_img_in, *d_img_out, *d_img_flatten;
double *d_img_contour;
int *gaussian_filter, *d_gaussian_filter;
gaussian_filter = (int *)malloc(sizeof(int) * GAUSSIAN_DIM * GAUSSIAN_DIM);
for (int i = 0; i < GAUSSIAN_DIM; i++)
{
for (int j = 0; j < GAUSSIAN_DIM; j++)
{
gaussian_filter[i * GAUSSIAN_DIM + j] = GAUSSIAN[i][j];
}
}
hipMalloc((void **)&d_img_in, size * sizeof(unsigned char));
hipMalloc((void **)&d_img_out, size * sizeof(unsigned char));
hipMalloc((void **)&d_img_flatten, img_in.height * img_in.width * sizeof(unsigned char));
hipMalloc((void **)&d_img_contour, img_in.height * img_in.width * sizeof(double));
hipMalloc((void **)&d_gaussian_filter, GAUSSIAN_DIM * GAUSSIAN_DIM * sizeof(int));
hipMemcpy(d_img_in, img_in.img, size * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(d_gaussian_filter, gaussian_filter, GAUSSIAN_DIM * GAUSSIAN_DIM * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_img_out, img_out.img, size * sizeof(unsigned char), hipMemcpyHostToDevice);
/*************** Kernel calls**************/
dim3 grid, block;
block.x = BLOCK_SIZE_X;
block.y = BLOCK_SIZE_Y;
grid.x = (img_in.width % block.x == 0) ? img_in.width / block.x : img_in.width / block.x + 1;
grid.y = (img_in.height % block.y == 0) ? img_in.height / block.y : img_in.height / block.y + 1;
//kernel 1
hipLaunchKernelGGL(( GaussianFilter), dim3(grid), dim3(block), 0, 0, d_img_in, d_img_out, img_in.width, img_in.height, img_in.channels, d_gaussian_filter);
// kernel 2
if (img_out.channels > 1)
hipLaunchKernelGGL(( RGB2GRAY), dim3(grid), dim3(block), 0, 0, d_img_out, d_img_flatten, img_in.width, img_in.height, img_in.channels);
//kernel 3
hipLaunchKernelGGL(( InitContour), dim3(grid), dim3(block), 0, 0, d_img_contour, img_in.width, img_in.height);
hipDeviceSynchronize();
img_out.channels = 1;
img_out.size = img_out.height * img_out.width;
hipMemcpy(img_out.img, d_img_flatten, img_out.size * sizeof(unsigned char), hipMemcpyDeviceToHost);
img_out.img = (unsigned char *)realloc(img_out.img, sizeof(unsigned char) * img_out.size);
hipMemcpy(img_out.contour0, d_img_contour, img_out.size * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_img_in);
hipFree(d_img_out);
hipFree(d_img_flatten);
hipFree(d_img_contour);
hipFree(d_gaussian_filter);
free(gaussian_filter);
}
__global__ void GetAverageIntensityOfRegionsKernel(Image img, double *n1, double *n2, double *d1, double *d2)
{
// Calculate threadId and perform computation
int tidX = threadIdx.x + blockIdx.x * blockDim.x;
int tidY = threadIdx.y + blockIdx.y * blockDim.y;
int gIndex = tidY * img.width + tidX;
if (gIndex >= img.size)
return;
double H_phi = 0.5 * (1 + (2 / PI) * atan(img.contour[gIndex] / H));
atomicAdd(n1, ((double)img.img[gIndex] * H_phi));
atomicAdd(d1, H_phi);
atomicAdd(n2, ((double)img.img[gIndex]) * (1 - H_phi));
atomicAdd(d2, 1 - H_phi);
}
__device__ double pow(double x, int p)
{
return x * x;
}
__global__ void ChanVeseCoreKernel(Image img, double *avgIntensity)
{
// Calculate identify threads
int i = threadIdx.y + blockIdx.y * blockDim.y; // i
int j = threadIdx.x + blockIdx.x * blockDim.x; // j
// int gIndex = i * img.width + j;
// Image boundary safety
if (j < 1 || j >= img.width - 1 || i < 1 || i >= img.height - 1)
return;
// Calculate Avg Intensity of Regions
double c1 = avgIntensity[0] / avgIntensity[2];
double c2 = avgIntensity[1] / avgIntensity[3];
// if (i == 1 && j == 1)
// printf("avg-Intensity: %f %f\n", c1, c2);
double i_j = img.contour0[i * img.width + j];
double iPlus_j = img.contour0[(i + 1) * img.width + j];
double i_jPlus = img.contour0[i * img.width + j + 1];
double i_jMinus = img.contour0[i * img.width + j - 1];
double iMinus_j = img.contour0[(i - 1) * img.width + j];
double iMinus_jPlus = img.contour0[(i - 1) * img.width + j + 1];
double iMinus_jMinus = img.contour0[(i - 1) * img.width + j - 1];
double iPlus_jMinus = img.contour0[(i + 1) * img.width + j - 1];
double L = 1;
double C1 = 1 / sqrt(EPSILON +
pow((iPlus_j - i_j), 2) +
pow((i_jPlus - i_jMinus), 2) / 4);
double C2 = 1 / sqrt(EPSILON +
pow((i_j - iMinus_j), 2) +
pow((iMinus_jPlus - iMinus_jMinus), 2) / 4);
double C3 = 1 / sqrt(EPSILON +
pow((iPlus_j - iMinus_j), 2) / 4.0 +
pow((i_jPlus - i_j), 2));
double C4 = 1 / sqrt(EPSILON +
pow((iPlus_jMinus - iMinus_jMinus), 2) / 4.0 +
pow((iPlus_j - iPlus_jMinus), 2));
double delPhi = H / (PI * (H * H + (i_j) * (i_j)));
double Multiple = DT * delPhi * MU * (double(P) * pow(L, P - 1));
double F = H / (H + Multiple * (C1 + C2 + C3 + C4));
Multiple = Multiple / (H + Multiple * (C1 + C2 + C3 + C4));
// double F1 = Multiple * C1;
// double F2 = Multiple * C2;
// double F3 = Multiple * C3;
// double F4 = Multiple * C4;
double CurrPixel = i_j - DT * delPhi * (NU + lambda1 * pow(img.img[i * img.width + j] - c1, 2) - lambda2 * pow(img.img[i * img.width + j] - c2, 2));
img.contour[i * img.width + j] = Multiple * C1 * iPlus_j +
Multiple * C2 * iMinus_j +
Multiple * C3 * i_jPlus +
Multiple * C4 * i_jMinus + F * CurrPixel;
}
void GetAverageIntensityOfRegions(dim3 grid, dim3 block, Image d_img, double *avgIntensity)
{
hipLaunchKernelGGL(( GetAverageIntensityOfRegionsKernel), dim3(grid), dim3(block), 0, 0, d_img, avgIntensity, avgIntensity + 1, avgIntensity + 2, avgIntensity + 3);
}
void ChanVeseCore(dim3 grid, dim3 block, Image &img, double *avgIntensity)
{
hipLaunchKernelGGL(( ChanVeseCoreKernel), dim3(grid), dim3(block), 0, 0, img, avgIntensity);
}
| 498ea1dfce82a28cecfa033ccaa0c3f1b25fea2b.cu |
#include "common.h"
__global__ void GaussianFilter(unsigned char *img_in, unsigned char *img_out, int width, int height, int channels, int *gaussian_filter)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < width && row < height)
{
int gaussianSum, min_row, max_row, min_col, max_col, g_x, g_y, imageIndex;
for (int channel = 0; channel < channels; channel++)
{
imageIndex = (row * width + col) * channels + channel;
gaussianSum = 0;
min_row = row - GAUSSIAN_DIM / 2 > 0 ? row - GAUSSIAN_DIM / 2 : 0;
max_row = (row + GAUSSIAN_DIM / 2 + 1) < height ? (row + GAUSSIAN_DIM / 2 + 1) : height;
min_col = col - GAUSSIAN_DIM / 2 > 0 ? col - GAUSSIAN_DIM / 2 : 0;
max_col = (col + GAUSSIAN_DIM / 2 + 1) < width ? (col + GAUSSIAN_DIM / 2 + 1) : width;
g_x = 0;
for (int offX = min_row; offX < max_row; offX++)
{
g_y = 0;
for (int offY = min_col; offY < max_col; offY++)
{
gaussianSum += img_in[(offX * width + offY) * channels + channel] * gaussian_filter[g_x * GAUSSIAN_DIM + g_y];
g_y++;
}
g_x++;
}
img_out[imageIndex] = gaussianSum / 273;
}
}
}
__global__ void RGB2GRAY(unsigned char *img_in, unsigned char *img_out, int width, int height, int channels)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < width && row < height)
{
int index = (row * width + col) * channels;
img_out[row * width + col] = img_in[index] / 3 + img_in[index + 1] / 3 + img_in[index + 2] / 3;
}
}
__global__ void InitContour(double *contour, int width, int height)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < width && row < height)
{
int x = double(row) - height / 2.0;
int y = double(col) - width / 2.0;
contour[row * width + col] = 900.0 / (900.0 + x * x + y * y) - 0.5; //radius/(radius + x*x + y*y) - 0.5;
}
}
void Preprocess_kernel(Image &img_in, Image &img_out)
{
img_out.channels = img_in.channels;
img_out.height = img_in.height;
img_out.width = img_in.width;
img_out.size = img_in.size;
img_out.contour0 = (double *)malloc(sizeof(double) * img_out.width * img_out.height);
img_out.contour = (double *)malloc(sizeof(double) * img_out.width * img_out.height);
img_out.contourOld = (double *)malloc(sizeof(double) * img_out.width * img_out.height);
img_out.img = (unsigned char *)malloc(sizeof(unsigned char) * img_out.size);
memcpy(img_out.img, img_in.img, sizeof(unsigned char) * img_in.size);
int size = img_in.height * img_in.width * img_in.channels;
unsigned char *d_img_in, *d_img_out, *d_img_flatten;
double *d_img_contour;
int *gaussian_filter, *d_gaussian_filter;
gaussian_filter = (int *)malloc(sizeof(int) * GAUSSIAN_DIM * GAUSSIAN_DIM);
for (int i = 0; i < GAUSSIAN_DIM; i++)
{
for (int j = 0; j < GAUSSIAN_DIM; j++)
{
gaussian_filter[i * GAUSSIAN_DIM + j] = GAUSSIAN[i][j];
}
}
cudaMalloc((void **)&d_img_in, size * sizeof(unsigned char));
cudaMalloc((void **)&d_img_out, size * sizeof(unsigned char));
cudaMalloc((void **)&d_img_flatten, img_in.height * img_in.width * sizeof(unsigned char));
cudaMalloc((void **)&d_img_contour, img_in.height * img_in.width * sizeof(double));
cudaMalloc((void **)&d_gaussian_filter, GAUSSIAN_DIM * GAUSSIAN_DIM * sizeof(int));
cudaMemcpy(d_img_in, img_in.img, size * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(d_gaussian_filter, gaussian_filter, GAUSSIAN_DIM * GAUSSIAN_DIM * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_img_out, img_out.img, size * sizeof(unsigned char), cudaMemcpyHostToDevice);
/*************** Kernel calls**************/
dim3 grid, block;
block.x = BLOCK_SIZE_X;
block.y = BLOCK_SIZE_Y;
grid.x = (img_in.width % block.x == 0) ? img_in.width / block.x : img_in.width / block.x + 1;
grid.y = (img_in.height % block.y == 0) ? img_in.height / block.y : img_in.height / block.y + 1;
//kernel 1
GaussianFilter<<<grid, block>>>(d_img_in, d_img_out, img_in.width, img_in.height, img_in.channels, d_gaussian_filter);
// kernel 2
if (img_out.channels > 1)
RGB2GRAY<<<grid, block>>>(d_img_out, d_img_flatten, img_in.width, img_in.height, img_in.channels);
//kernel 3
InitContour<<<grid, block>>>(d_img_contour, img_in.width, img_in.height);
cudaDeviceSynchronize();
img_out.channels = 1;
img_out.size = img_out.height * img_out.width;
cudaMemcpy(img_out.img, d_img_flatten, img_out.size * sizeof(unsigned char), cudaMemcpyDeviceToHost);
img_out.img = (unsigned char *)realloc(img_out.img, sizeof(unsigned char) * img_out.size);
cudaMemcpy(img_out.contour0, d_img_contour, img_out.size * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_img_in);
cudaFree(d_img_out);
cudaFree(d_img_flatten);
cudaFree(d_img_contour);
cudaFree(d_gaussian_filter);
free(gaussian_filter);
}
__global__ void GetAverageIntensityOfRegionsKernel(Image img, double *n1, double *n2, double *d1, double *d2)
{
// Calculate threadId and perform computation
int tidX = threadIdx.x + blockIdx.x * blockDim.x;
int tidY = threadIdx.y + blockIdx.y * blockDim.y;
int gIndex = tidY * img.width + tidX;
if (gIndex >= img.size)
return;
double H_phi = 0.5 * (1 + (2 / PI) * atan(img.contour[gIndex] / H));
atomicAdd(n1, ((double)img.img[gIndex] * H_phi));
atomicAdd(d1, H_phi);
atomicAdd(n2, ((double)img.img[gIndex]) * (1 - H_phi));
atomicAdd(d2, 1 - H_phi);
}
__device__ double pow(double x, int p)
{
return x * x;
}
__global__ void ChanVeseCoreKernel(Image img, double *avgIntensity)
{
// Calculate identify threads
int i = threadIdx.y + blockIdx.y * blockDim.y; // i
int j = threadIdx.x + blockIdx.x * blockDim.x; // j
// int gIndex = i * img.width + j;
// Image boundary safety
if (j < 1 || j >= img.width - 1 || i < 1 || i >= img.height - 1)
return;
// Calculate Avg Intensity of Regions
double c1 = avgIntensity[0] / avgIntensity[2];
double c2 = avgIntensity[1] / avgIntensity[3];
// if (i == 1 && j == 1)
// printf("avg-Intensity: %f %f\n", c1, c2);
double i_j = img.contour0[i * img.width + j];
double iPlus_j = img.contour0[(i + 1) * img.width + j];
double i_jPlus = img.contour0[i * img.width + j + 1];
double i_jMinus = img.contour0[i * img.width + j - 1];
double iMinus_j = img.contour0[(i - 1) * img.width + j];
double iMinus_jPlus = img.contour0[(i - 1) * img.width + j + 1];
double iMinus_jMinus = img.contour0[(i - 1) * img.width + j - 1];
double iPlus_jMinus = img.contour0[(i + 1) * img.width + j - 1];
double L = 1;
double C1 = 1 / sqrt(EPSILON +
pow((iPlus_j - i_j), 2) +
pow((i_jPlus - i_jMinus), 2) / 4);
double C2 = 1 / sqrt(EPSILON +
pow((i_j - iMinus_j), 2) +
pow((iMinus_jPlus - iMinus_jMinus), 2) / 4);
double C3 = 1 / sqrt(EPSILON +
pow((iPlus_j - iMinus_j), 2) / 4.0 +
pow((i_jPlus - i_j), 2));
double C4 = 1 / sqrt(EPSILON +
pow((iPlus_jMinus - iMinus_jMinus), 2) / 4.0 +
pow((iPlus_j - iPlus_jMinus), 2));
double delPhi = H / (PI * (H * H + (i_j) * (i_j)));
double Multiple = DT * delPhi * MU * (double(P) * pow(L, P - 1));
double F = H / (H + Multiple * (C1 + C2 + C3 + C4));
Multiple = Multiple / (H + Multiple * (C1 + C2 + C3 + C4));
// double F1 = Multiple * C1;
// double F2 = Multiple * C2;
// double F3 = Multiple * C3;
// double F4 = Multiple * C4;
double CurrPixel = i_j - DT * delPhi * (NU + lambda1 * pow(img.img[i * img.width + j] - c1, 2) - lambda2 * pow(img.img[i * img.width + j] - c2, 2));
img.contour[i * img.width + j] = Multiple * C1 * iPlus_j +
Multiple * C2 * iMinus_j +
Multiple * C3 * i_jPlus +
Multiple * C4 * i_jMinus + F * CurrPixel;
}
void GetAverageIntensityOfRegions(dim3 grid, dim3 block, Image d_img, double *avgIntensity)
{
GetAverageIntensityOfRegionsKernel<<<grid, block>>>(d_img, avgIntensity, avgIntensity + 1, avgIntensity + 2, avgIntensity + 3);
}
void ChanVeseCore(dim3 grid, dim3 block, Image &img, double *avgIntensity)
{
ChanVeseCoreKernel<<<grid, block>>>(img, avgIntensity);
}
|
Sorting.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/native/SortingUtils.h>
#include <c10/macros/Macros.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/native/hip/SortingRadixSelect.cuh>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/MemoryOverlap.h>
#include <THH/THHDeviceUtils.cuh> // only for THCRoundUp?
#include <THH/THHNumerics.cuh>
#include <THH/THHScanUtils.cuh>
#include <THH/THHTensorMathReduce.cuh> // AddOp
#include <cassert>
#include <cstdlib>
namespace at {
namespace native {
namespace {
// Finds the rank k element, and its index, of the values along dimension dim
template <typename scalar_t, typename index_t, int Dim>
__global__ void gatherKthValue(
cuda::detail::TensorInfo<scalar_t, index_t> input,
index_t inputSliceSize,
index_t k,
index_t numInputSlices,
index_t inputWithinSliceStride,
cuda::detail::TensorInfo<scalar_t, index_t> kthValue,
cuda::detail::TensorInfo<int64_t, index_t> indices) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of index_t
__shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit
index_t slice = getLinearBlockId<index_t>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
index_t sliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input);
index_t kthValueSliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, kthValue);
index_t indicesSliceStartIndex =
cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices);
scalar_t* inputSliceStart = &input.data[sliceStartIndex];
scalar_t* kthValueSliceStart = &kthValue.data[kthValueSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
scalar_t kValue = static_cast<scalar_t>(0);
radixSelect<
scalar_t,
typename TopKTypeConfig<scalar_t>::RadixType,
index_t,
false>(
inputSliceStart,
k,
inputSliceSize,
inputWithinSliceStride,
smem,
&kValue);
// Find the index of the k-th highest element
index_t kValueIndex = 0;
bool foundKValue = false;
for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride])
: static_cast<scalar_t>(0);
bool isKValue = inRange &&
((v == kValue) ||
(THCNumerics<scalar_t>::isnan(v) &&
THCNumerics<scalar_t>::isnan(kValue)));
if (isKValue) {
kValueIndex = i;
foundKValue = true;
break;
}
}
if (foundKValue) {
kthValueSliceStart[0] = kValue;
indicesSliceStart[0] = kValueIndex;
}
}
// CUDA kernel to find the median, and its index, of the values along dimension dim
template <typename scalar_t, typename index_t, int Dim>
__global__ void gatherMedian(
cuda::detail::TensorInfo<scalar_t, index_t> values,
cuda::detail::TensorInfo<int64_t, index_t> indices,
cuda::detail::TensorInfo<scalar_t, index_t> input,
index_t inputSliceSize,
index_t numInputSlices,
index_t inputWithinSliceStride,
bool ignore_nan) {
// Shared memory for the subroutine RadixSelect. Note that RadixSelect converts the
// floating point type to int with the same relative ordering.
__shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit
index_t slice = getLinearBlockId<index_t>();
if (slice >= numInputSlices) {
return;
}
// Finds the start offset for our slice
index_t valuesSliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, values);
index_t indicesSliceStartIndex =
cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices);
index_t inputSliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input);
scalar_t* valuesSliceStart = &values.data[valuesSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
scalar_t* inputSliceStart = &input.data[inputSliceStartIndex];
index_t nan_count = 0;
for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) {
scalar_t val = doLdg(&inputSliceStart[i * inputWithinSliceStride]);
nan_count += THCNumerics<scalar_t>::isnan(val) ? 1 : 0;
}
// Counts number of nan values
// This code performs a parallel sum reduction (not the most efficient code)
__shared__ int64_t num_nan;
if (threadIdx.x == 0) {
num_nan = 0;
}
__syncthreads();
if (nan_count > 0) {
atomicAdd(&num_nan, nan_count);
}
__syncthreads();
// For torch.median, if we found nan set k to last index so the computed value
// is nan, otherwise set k to the middle element of the non-nan values
index_t k = (!ignore_nan && num_nan > 0) ? inputSliceSize - 1
: (inputSliceSize - num_nan - 1) / 2;
// Find the median
scalar_t median = static_cast<scalar_t>(0);
radixSelect<
scalar_t,
typename TopKTypeConfig<scalar_t>::RadixType,
index_t,
false>(
inputSliceStart,
k + 1,
inputSliceSize,
inputWithinSliceStride,
smem,
&median);
valuesSliceStart[0] = median;
// Find the index of the median value in the slice
for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) {
scalar_t val = doLdg(&inputSliceStart[i * inputWithinSliceStride]);
if (val == median ||
(THCNumerics<scalar_t>::isnan(val) &&
THCNumerics<scalar_t>::isnan(median))) {
indicesSliceStart[0] = i;
break;
}
}
}
struct KthValueLauncher {
int64_t k;
KthValueLauncher(int64_t k) : k(k) {}
template <typename scalar_t, typename index_t, int all_dims>
inline void launch(
cuda::detail::TensorInfo<scalar_t, index_t> values_info,
int collapse_values_dim,
cuda::detail::TensorInfo<int64_t, index_t> indices_info,
int collapse_indices_dim,
cuda::detail::TensorInfo<scalar_t, index_t> self_info,
int collapse_self_dim,
int64_t num_slices,
int64_t slice_size) {
dim3 grid;
if (!getGridFromTiles(num_slices, grid)) {
AT_ERROR("slices are too many");
}
dim3 block(::min(
THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( gatherKthValue<scalar_t, index_t, all_dims>), dim3(grid), dim3(block), 0, stream,
self_info,
slice_size,
k,
num_slices,
/* The actual dimension that the k-selection is running in */
/* may have changed from collapseDims() */
self_info.strides[collapse_self_dim],
values_info,
indices_info);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
};
struct MedianLauncher {
bool ignore_nan;
MedianLauncher(bool ignore_nan) : ignore_nan(ignore_nan) {}
template <typename scalar_t, typename index_t, int all_dims>
inline void launch(
cuda::detail::TensorInfo<scalar_t, index_t> values_info,
int collapse_values_dim,
cuda::detail::TensorInfo<int64_t, index_t> indices_info,
int collapse_indices_dim,
cuda::detail::TensorInfo<scalar_t, index_t> self_info,
int collapse_self_dim,
int64_t num_slices,
int64_t slice_size) {
dim3 grid;
if (!getGridFromTiles(num_slices, grid)) {
AT_ERROR("slices are too many");
}
dim3 block(::min(
THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( gatherMedian<scalar_t, index_t, all_dims>), dim3(grid), dim3(block), 0, stream,
values_info,
indices_info,
self_info,
slice_size,
num_slices,
self_info.strides[collapse_self_dim],
ignore_nan);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
};
template <typename scalar_t>
void kthvalue_cuda_template(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim_,
bool keepdim) {
int64_t dim = maybe_wrap_dim(dim_, self.dim());
int64_t slicesize = self.dim() == 0 ? 1 : self.size(dim);
zero_numel_check_dims(self, dim, "kth_value()");
TORCH_CHECK(k >= 1 && k <= slicesize, "selected number k out of range");
at::assert_no_overlap(self, values);
_reduction_with_indices_allocate_or_resize_output(
values, indices, self, dim, keepdim);
if (self.dim() == 0 && self.numel() == 1) {
values.copy_(self);
indices.zero_();
return;
}
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
// Based on required index size, run the algorithm with the
// appropriate index type
if (self.numel() != 0) {
AT_DISPATCH_INDEX_TYPES(
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(values) &&
cuda::detail::canUse32BitIndexMath(indices) ? ScalarType::Int : ScalarType::Long,
"kth_value_launcher", [&] {
run_launcher<scalar_t, index_t>(
values, indices, self, dim, KthValueLauncher(k));
});
}
if (!keepdim) {
values.squeeze_(dim);
indices.squeeze_(dim);
}
}
std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim) {
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "kthvalue_cuda", [&] {
kthvalue_cuda_template<scalar_t>(
values, indices, self, k, dim, keepdim);
});
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor&, Tensor&> median_with_indices_impl(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t dim,
bool keepdim,
bool ignore_nan) {
// See note [Writing Nondeterministic Operations]
// If there are duplicate elements of a median value, the procedure for choosing which
// of the duplicates to use for the indices output is nondeterministic.
at::globalContext().alertNotDeterministic("median CUDA with indices output");
NoNamesGuard guard;
dim = at::maybe_wrap_dim(dim, self.dim());
Tensor in = self.dim() > 0 ? self.contiguous() : self.unsqueeze(0);
checkDeviceType("median", {values, indices}, self.device().type());
checkScalarType("median", {indices, "indices", 1}, kLong);
checkSameType("median", {values, "values", 0}, {self, "self", 2});
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"median() cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
std::vector<int64_t> out_shape = self.sizes().vec();
zero_numel_check_dims(self, dim, "median()");
if (self.dim() > 0) {
if (keepdim) {
out_shape[dim] = 1;
} else {
out_shape.erase(out_shape.begin() + dim);
}
}
values.resize_(out_shape);
indices.resize_(out_shape);
// Only launch kernel for non-empty tensors
if (self.numel() > 0) {
// Ensure #dim is the same for all tensors required for reduction
Tensor vals = keepdim && self.dim() > 0 ? values : values.unsqueeze(dim);
Tensor inds = keepdim && self.dim() > 0 ? indices : indices.unsqueeze(dim);
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "median_out_impl", [&] {
if (cuda::detail::canUse32BitIndexMath(vals) &&
cuda::detail::canUse32BitIndexMath(inds) &&
cuda::detail::canUse32BitIndexMath(in)) {
run_launcher<scalar_t, uint32_t>(
vals, inds, in, dim, MedianLauncher(ignore_nan));
} else {
run_launcher<scalar_t, uint64_t>(
vals, inds, in, dim, MedianLauncher(ignore_nan));
}
});
}
guard.reset();
namedinference::propagate_names_for_reduction(values, self, dim, keepdim);
namedinference::propagate_names_for_reduction(indices, self, dim, keepdim);
return std::forward_as_tuple(values, indices);
}
Tensor median_impl(const Tensor& self, bool ignore_nan) {
NoNamesGuard guard;
int64_t size = self.numel();
TORCH_CHECK(size > 0, "median() input tensor cannot be empty");
// Sort input tensor to efficiently query for median element
Tensor sorted = std::get<0>(self.flatten().sort());
if (!ignore_nan) {
// For torch.median return either the middle element or nan (sorted as
// largest) if there are any
int64_t k = (size - 1) / 2;
return at::where(sorted[-1].isnan(), sorted[-1], sorted[k]);
} else {
// For torch.nanmedian return the middle element among the non-nan values
Tensor k = ((size - 1) - sorted.isnan().sum()) / 2;
return sorted[k.toType(kLong)];
}
}
} // namespace
// Mark: kthvalue
std::tuple<Tensor&, Tensor&> kthvalue_out_cuda(
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim,
Tensor& values,
Tensor& indices) {
// See note [Writing Nondeterministic Operations]
// If there are duplicate elements of the kth value, the procedure for choosing which
// of the duplicates to use for the indices output is nondeterministic.
at::globalContext().alertNotDeterministic("kthvalue CUDA");
auto result = [&]() {
NoNamesGuard guard;
// `kthvalue_out_impl_cuda` expects contiguous in input `self`.
return kthvalue_out_impl_cuda(values, indices, self.contiguous(), k, dim, keepdim);
}();
namedinference::propagate_names_for_reduction(values, self, dim, keepdim);
namedinference::propagate_names_for_reduction(indices, self, dim, keepdim);
return result;
}
// Mark: median
std::tuple<Tensor&, Tensor&> median_out_cuda(
const Tensor& self,
int64_t dim,
bool keepdim,
Tensor& values,
Tensor& indices) {
return median_with_indices_impl(
values, indices, self, dim, keepdim, /*ignore_nan=*/false);
}
Tensor median_cuda(const Tensor& self) {
return median_impl(self, /*ignore_nan=*/false);
}
std::tuple<Tensor&, Tensor&> nanmedian_out_cuda(
const Tensor& self,
int64_t dim,
bool keepdim,
Tensor& values,
Tensor& indices) {
return median_with_indices_impl(
values, indices, self, dim, keepdim, /*ignore_nan=*/true);
}
Tensor nanmedian_cuda(const Tensor& self) {
return median_impl(self, /*ignore_nan=*/true);
}
} // namespace native
} // namespace at
| Sorting.cu | #include <ATen/ATen.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/native/SortingUtils.h>
#include <c10/macros/Macros.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/native/cuda/SortingRadixSelect.cuh>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/MemoryOverlap.h>
#include <THC/THCDeviceUtils.cuh> // only for THCRoundUp?
#include <THC/THCNumerics.cuh>
#include <THC/THCScanUtils.cuh>
#include <THC/THCTensorMathReduce.cuh> // AddOp
#include <cassert>
#include <cstdlib>
namespace at {
namespace native {
namespace {
// Finds the rank k element, and its index, of the values along dimension dim
template <typename scalar_t, typename index_t, int Dim>
__global__ void gatherKthValue(
cuda::detail::TensorInfo<scalar_t, index_t> input,
index_t inputSliceSize,
index_t k,
index_t numInputSlices,
index_t inputWithinSliceStride,
cuda::detail::TensorInfo<scalar_t, index_t> kthValue,
cuda::detail::TensorInfo<int64_t, index_t> indices) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of index_t
__shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit
index_t slice = getLinearBlockId<index_t>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
index_t sliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input);
index_t kthValueSliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, kthValue);
index_t indicesSliceStartIndex =
cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices);
scalar_t* inputSliceStart = &input.data[sliceStartIndex];
scalar_t* kthValueSliceStart = &kthValue.data[kthValueSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
scalar_t kValue = static_cast<scalar_t>(0);
radixSelect<
scalar_t,
typename TopKTypeConfig<scalar_t>::RadixType,
index_t,
false>(
inputSliceStart,
k,
inputSliceSize,
inputWithinSliceStride,
smem,
&kValue);
// Find the index of the k-th highest element
index_t kValueIndex = 0;
bool foundKValue = false;
for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride])
: static_cast<scalar_t>(0);
bool isKValue = inRange &&
((v == kValue) ||
(THCNumerics<scalar_t>::isnan(v) &&
THCNumerics<scalar_t>::isnan(kValue)));
if (isKValue) {
kValueIndex = i;
foundKValue = true;
break;
}
}
if (foundKValue) {
kthValueSliceStart[0] = kValue;
indicesSliceStart[0] = kValueIndex;
}
}
// CUDA kernel to find the median, and its index, of the values along dimension dim
template <typename scalar_t, typename index_t, int Dim>
__global__ void gatherMedian(
cuda::detail::TensorInfo<scalar_t, index_t> values,
cuda::detail::TensorInfo<int64_t, index_t> indices,
cuda::detail::TensorInfo<scalar_t, index_t> input,
index_t inputSliceSize,
index_t numInputSlices,
index_t inputWithinSliceStride,
bool ignore_nan) {
// Shared memory for the subroutine RadixSelect. Note that RadixSelect converts the
// floating point type to int with the same relative ordering.
__shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit
index_t slice = getLinearBlockId<index_t>();
if (slice >= numInputSlices) {
return;
}
// Finds the start offset for our slice
index_t valuesSliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, values);
index_t indicesSliceStartIndex =
cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices);
index_t inputSliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input);
scalar_t* valuesSliceStart = &values.data[valuesSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
scalar_t* inputSliceStart = &input.data[inputSliceStartIndex];
index_t nan_count = 0;
for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) {
scalar_t val = doLdg(&inputSliceStart[i * inputWithinSliceStride]);
nan_count += THCNumerics<scalar_t>::isnan(val) ? 1 : 0;
}
// Counts number of nan values
// This code performs a parallel sum reduction (not the most efficient code)
__shared__ int64_t num_nan;
if (threadIdx.x == 0) {
num_nan = 0;
}
__syncthreads();
if (nan_count > 0) {
atomicAdd(&num_nan, nan_count);
}
__syncthreads();
// For torch.median, if we found nan set k to last index so the computed value
// is nan, otherwise set k to the middle element of the non-nan values
index_t k = (!ignore_nan && num_nan > 0) ? inputSliceSize - 1
: (inputSliceSize - num_nan - 1) / 2;
// Find the median
scalar_t median = static_cast<scalar_t>(0);
radixSelect<
scalar_t,
typename TopKTypeConfig<scalar_t>::RadixType,
index_t,
false>(
inputSliceStart,
k + 1,
inputSliceSize,
inputWithinSliceStride,
smem,
&median);
valuesSliceStart[0] = median;
// Find the index of the median value in the slice
for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) {
scalar_t val = doLdg(&inputSliceStart[i * inputWithinSliceStride]);
if (val == median ||
(THCNumerics<scalar_t>::isnan(val) &&
THCNumerics<scalar_t>::isnan(median))) {
indicesSliceStart[0] = i;
break;
}
}
}
struct KthValueLauncher {
int64_t k;
KthValueLauncher(int64_t k) : k(k) {}
template <typename scalar_t, typename index_t, int all_dims>
inline void launch(
cuda::detail::TensorInfo<scalar_t, index_t> values_info,
int collapse_values_dim,
cuda::detail::TensorInfo<int64_t, index_t> indices_info,
int collapse_indices_dim,
cuda::detail::TensorInfo<scalar_t, index_t> self_info,
int collapse_self_dim,
int64_t num_slices,
int64_t slice_size) {
dim3 grid;
if (!getGridFromTiles(num_slices, grid)) {
AT_ERROR("slices are too many");
}
dim3 block(std::min(
THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024));
auto stream = at::cuda::getCurrentCUDAStream();
gatherKthValue<scalar_t, index_t, all_dims><<<grid, block, 0, stream>>>(
self_info,
slice_size,
k,
num_slices,
/* The actual dimension that the k-selection is running in */
/* may have changed from collapseDims() */
self_info.strides[collapse_self_dim],
values_info,
indices_info);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
};
struct MedianLauncher {
bool ignore_nan;
MedianLauncher(bool ignore_nan) : ignore_nan(ignore_nan) {}
template <typename scalar_t, typename index_t, int all_dims>
inline void launch(
cuda::detail::TensorInfo<scalar_t, index_t> values_info,
int collapse_values_dim,
cuda::detail::TensorInfo<int64_t, index_t> indices_info,
int collapse_indices_dim,
cuda::detail::TensorInfo<scalar_t, index_t> self_info,
int collapse_self_dim,
int64_t num_slices,
int64_t slice_size) {
dim3 grid;
if (!getGridFromTiles(num_slices, grid)) {
AT_ERROR("slices are too many");
}
dim3 block(std::min(
THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024));
auto stream = at::cuda::getCurrentCUDAStream();
gatherMedian<scalar_t, index_t, all_dims><<<grid, block, 0, stream>>>(
values_info,
indices_info,
self_info,
slice_size,
num_slices,
self_info.strides[collapse_self_dim],
ignore_nan);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
};
template <typename scalar_t>
void kthvalue_cuda_template(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim_,
bool keepdim) {
int64_t dim = maybe_wrap_dim(dim_, self.dim());
int64_t slicesize = self.dim() == 0 ? 1 : self.size(dim);
zero_numel_check_dims(self, dim, "kth_value()");
TORCH_CHECK(k >= 1 && k <= slicesize, "selected number k out of range");
at::assert_no_overlap(self, values);
_reduction_with_indices_allocate_or_resize_output(
values, indices, self, dim, keepdim);
if (self.dim() == 0 && self.numel() == 1) {
values.copy_(self);
indices.zero_();
return;
}
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
// Based on required index size, run the algorithm with the
// appropriate index type
if (self.numel() != 0) {
AT_DISPATCH_INDEX_TYPES(
cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(values) &&
cuda::detail::canUse32BitIndexMath(indices) ? ScalarType::Int : ScalarType::Long,
"kth_value_launcher", [&] {
run_launcher<scalar_t, index_t>(
values, indices, self, dim, KthValueLauncher(k));
});
}
if (!keepdim) {
values.squeeze_(dim);
indices.squeeze_(dim);
}
}
std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim) {
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "kthvalue_cuda", [&] {
kthvalue_cuda_template<scalar_t>(
values, indices, self, k, dim, keepdim);
});
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor&, Tensor&> median_with_indices_impl(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t dim,
bool keepdim,
bool ignore_nan) {
// See note [Writing Nondeterministic Operations]
// If there are duplicate elements of a median value, the procedure for choosing which
// of the duplicates to use for the indices output is nondeterministic.
at::globalContext().alertNotDeterministic("median CUDA with indices output");
NoNamesGuard guard;
dim = at::maybe_wrap_dim(dim, self.dim());
Tensor in = self.dim() > 0 ? self.contiguous() : self.unsqueeze(0);
checkDeviceType("median", {values, indices}, self.device().type());
checkScalarType("median", {indices, "indices", 1}, kLong);
checkSameType("median", {values, "values", 0}, {self, "self", 2});
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"median() cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
std::vector<int64_t> out_shape = self.sizes().vec();
zero_numel_check_dims(self, dim, "median()");
if (self.dim() > 0) {
if (keepdim) {
out_shape[dim] = 1;
} else {
out_shape.erase(out_shape.begin() + dim);
}
}
values.resize_(out_shape);
indices.resize_(out_shape);
// Only launch kernel for non-empty tensors
if (self.numel() > 0) {
// Ensure #dim is the same for all tensors required for reduction
Tensor vals = keepdim && self.dim() > 0 ? values : values.unsqueeze(dim);
Tensor inds = keepdim && self.dim() > 0 ? indices : indices.unsqueeze(dim);
AT_DISPATCH_ALL_TYPES_AND(
at::ScalarType::Half, self.scalar_type(), "median_out_impl", [&] {
if (cuda::detail::canUse32BitIndexMath(vals) &&
cuda::detail::canUse32BitIndexMath(inds) &&
cuda::detail::canUse32BitIndexMath(in)) {
run_launcher<scalar_t, uint32_t>(
vals, inds, in, dim, MedianLauncher(ignore_nan));
} else {
run_launcher<scalar_t, uint64_t>(
vals, inds, in, dim, MedianLauncher(ignore_nan));
}
});
}
guard.reset();
namedinference::propagate_names_for_reduction(values, self, dim, keepdim);
namedinference::propagate_names_for_reduction(indices, self, dim, keepdim);
return std::forward_as_tuple(values, indices);
}
Tensor median_impl(const Tensor& self, bool ignore_nan) {
NoNamesGuard guard;
int64_t size = self.numel();
TORCH_CHECK(size > 0, "median() input tensor cannot be empty");
// Sort input tensor to efficiently query for median element
Tensor sorted = std::get<0>(self.flatten().sort());
if (!ignore_nan) {
// For torch.median return either the middle element or nan (sorted as
// largest) if there are any
int64_t k = (size - 1) / 2;
return at::where(sorted[-1].isnan(), sorted[-1], sorted[k]);
} else {
// For torch.nanmedian return the middle element among the non-nan values
Tensor k = ((size - 1) - sorted.isnan().sum()) / 2;
return sorted[k.toType(kLong)];
}
}
} // namespace
// Mark: kthvalue
std::tuple<Tensor&, Tensor&> kthvalue_out_cuda(
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim,
Tensor& values,
Tensor& indices) {
// See note [Writing Nondeterministic Operations]
// If there are duplicate elements of the kth value, the procedure for choosing which
// of the duplicates to use for the indices output is nondeterministic.
at::globalContext().alertNotDeterministic("kthvalue CUDA");
auto result = [&]() {
NoNamesGuard guard;
// `kthvalue_out_impl_cuda` expects contiguous in input `self`.
return kthvalue_out_impl_cuda(values, indices, self.contiguous(), k, dim, keepdim);
}();
namedinference::propagate_names_for_reduction(values, self, dim, keepdim);
namedinference::propagate_names_for_reduction(indices, self, dim, keepdim);
return result;
}
// Mark: median
std::tuple<Tensor&, Tensor&> median_out_cuda(
const Tensor& self,
int64_t dim,
bool keepdim,
Tensor& values,
Tensor& indices) {
return median_with_indices_impl(
values, indices, self, dim, keepdim, /*ignore_nan=*/false);
}
Tensor median_cuda(const Tensor& self) {
return median_impl(self, /*ignore_nan=*/false);
}
std::tuple<Tensor&, Tensor&> nanmedian_out_cuda(
const Tensor& self,
int64_t dim,
bool keepdim,
Tensor& values,
Tensor& indices) {
return median_with_indices_impl(
values, indices, self, dim, keepdim, /*ignore_nan=*/true);
}
Tensor nanmedian_cuda(const Tensor& self) {
return median_impl(self, /*ignore_nan=*/true);
}
} // namespace native
} // namespace at
|
8b7e5f4219cf5f4035fbd491537625f00da69713.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
//hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
//if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
//}
std::chrono::high_resolution_clock::time_point start, stop;
const int loop = 10000;
hipError_t cudaStatus;
start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < loop; i++)
{
cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
}
stop = std::chrono::high_resolution_clock::now();
printf("CUDA is\n");
printf("\t{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
printf("\ttime = {%lld}\n", ms.count());
start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < loop; i++)
{
auto pA = &a[0];
auto pB = &b[0];
auto pC = &c[0];
for (size_t j = 0; j < arraySize; j++, pA++, pB++, pC++)
{
*pC = *pA + *pB;
}
}
stop = std::chrono::high_resolution_clock::now();
printf("No CUDA is\n");
printf("\t{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
ms = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
printf("\ttime = {%lld}\n", ms.count());
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> >(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 8b7e5f4219cf5f4035fbd491537625f00da69713.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
//cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
//}
std::chrono::high_resolution_clock::time_point start, stop;
const int loop = 10000;
cudaError_t cudaStatus;
start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < loop; i++)
{
cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
}
stop = std::chrono::high_resolution_clock::now();
printf("CUDA is\n");
printf("\t{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
printf("\ttime = {%lld}\n", ms.count());
start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < loop; i++)
{
auto pA = &a[0];
auto pB = &b[0];
auto pC = &c[0];
for (size_t j = 0; j < arraySize; j++, pA++, pB++, pC++)
{
*pC = *pA + *pB;
}
}
stop = std::chrono::high_resolution_clock::now();
printf("No CUDA is\n");
printf("\t{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
ms = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
printf("\ttime = {%lld}\n", ms.count());
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> >(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
9695ed47867df4b2b5fabc2d681800a8f44b057c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/opencv_modules.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
__global__ void distance_matching(float* i_img, uchar* d_zimg, float* remapX, float* remapY)
{
int idx = blockIdx.x * blockDim.x;
int idy = blockIdx.y * blockDim.y;
int w = 256;
float distance = 100.0;
float search_range = 0.75f;
double disX;
double disY;
disX = (double)(remapX[idx + w*idy] -(float)idx);
disY = (double)(remapY[idx + w*idy] -(float)idy);
distance = pow(disX, 2) + pow(disY, 2);
if ( distance > 0 && distance < search_range) {
i_img[idx + idy*w] *= (1-1/distance);
i_img[idx + idy*w] += ((float)d_zimg[idx + idy*w])*(1/distance);
}
}
__global__ void non_uniform_pixel_fusion(uchar* d_img,
uchar* d_zimg,
uchar* r_img,
int img_w,
int img_h,
int zmd_w,
int zmd_h,
float* remapX,
float* remapY,
float* minimum,
float* maximum)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx > img_w || idy > img_h) return;
uchar subpixels[32];
float weights[32];
for (int i = 0; i < 32; i++) {
subpixels[i] = (uchar)0;
weights[i] = (float)0;
}
double distance;
float average;
float total_weight;
float search_range = 0.75f;
// go trough every pixel of the area found by matching the zoomed image
weights[0] = 1.0f;
subpixels[0] = d_img[idx + idy*img_w];
int px_count = 1;
distance = 100.0;
double disX;
double disY;
if( (float)idx >= minimum[0]-search_range && (float)idy >= minimum[1]+search_range &&
(float)idx <= maximum[0]-search_range && (float)idy <= maximum[1]+search_range) {
for(int k = 0; k < zmd_w; k++) {
for(int l = 0; l < zmd_h; l++) {
disX = (double)(remapX[k + zmd_w*l] -(float)idx);
disY = (double)(remapY[k + zmd_w*l] -(float)idy);
distance = pow(disX, 2) + pow(disY, 2);
if ( distance > 0 && distance < search_range) {
weights[px_count] = (float)(1/distance);
subpixels[px_count] = d_zimg[k + zmd_w*l];
px_count++;
}
}
}
}
// calculate the total weights of the pixels
total_weight = 0;
for(int j = 0; j < px_count; j++) {
total_weight += weights[j];
}
// count the average of the subpixels and set it as the result pixel
if( px_count > 0 ) {
int i;
average = 0;
for(i = 0; i < px_count; i++) {
average += (float)(subpixels[i])*weights[i];
}
r_img[idx + img_w*idy] = (uchar)(average/total_weight);
//r_img[idx + img_w*idy] = 255;
}
}
extern void cuda_doStuff(Mat* img,
Mat& result,
std::vector<Mat>* images,
float* remappedX,
float* remappedY,
float* minimum,
float* maximum,
std::vector<Mat> H)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, 0);
printf("Global memory: %i\n", props.totalGlobalMem);
printf("Warp size: %i\n", props.warpSize);
printf("Threads per blk: %i\n", props.maxThreadsPerBlock);
printf("Max block dim: %i\n", props.maxThreadsDim[0]);
printf("Max grid dim: %i\n", props.maxGridSize[0]);
int i_h = img->size().height;
int i_w = img->size().width;
size_t size = i_w*i_h;
int z_h = images->at(0).size().height;
int z_w = images->at(0).size().width;
size_t zoomed_size = z_w*z_h;
printf("image size: %i\n", size);
printf("img_w: %i\timg_h: %i\tzmg_w: %i\tzmg_h: %i\n", i_w, i_h, z_w, z_h);
printf("min: %f, %f\tmax: %f, %f\n", minimum[0], minimum[1], maximum[0], maximum[1]);
uchar* d_img;
hipMalloc((void**)&d_img, sizeof(uchar)*size);
uchar* r_img;
hipMalloc((void**)&r_img, sizeof(uchar)*size);
uchar* d_zimg;
hipMalloc((void**)&d_zimg, sizeof(uchar)*size);
uchar* h_img;
if(img->isContinuous()) {
h_img = img->data;
}
uchar* h_zimg;
h_zimg = images->at(0).data;
float* remapX;
hipMalloc(&remapX, sizeof(float)*size);
float* remapY;
hipMalloc(&remapY, sizeof(float)*size);
float* mini;
hipMalloc(&mini, sizeof(float)*2);
float* maxi;
hipMalloc(&maxi, sizeof(float)*2);
//memory copies to the device and prints errors
hipMemcpy(d_img, h_img, sizeof(uchar)*size, hipMemcpyHostToDevice);
printf("h_img -> d_img:\t%s\n", hipGetErrorString(hipGetLastError()));
hipDeviceSynchronize();
hipMemcpy(d_zimg, h_zimg, sizeof(uchar)*zoomed_size, hipMemcpyHostToDevice);
printf("h_zimg -> d_zimg:\t%s\n", hipGetErrorString(hipGetLastError()));
hipMemcpy(remapX, remappedX, sizeof(float)*size, hipMemcpyHostToDevice);
printf("remappedX -> remapX:\t%s\n", hipGetErrorString(hipGetLastError()));
hipMemcpy(remapY, remappedY, sizeof(float)*size, hipMemcpyHostToDevice);
printf("remappedY -> remapY:\t%s\n", hipGetErrorString(hipGetLastError()));
hipMemcpy(mini, minimum, sizeof(float)*2, hipMemcpyHostToDevice);
printf("minimum -> mini:\t%s\n", hipGetErrorString(hipGetLastError()));
hipMemcpy(maxi, maximum, sizeof(float)*2, hipMemcpyHostToDevice);
printf("maximum -> maxi:\t%s\n", hipGetErrorString(hipGetLastError()));
dim3 threadsPerBlock(32, 32);
dim3 numberOfBlocks(ceil((float)i_w/(float)threadsPerBlock.x), ceil((float)i_h/(float)threadsPerBlock.y));
non_uniform_pixel_fusion << <numberOfBlocks, threadsPerBlock >> >(d_img, d_zimg, r_img,
i_w, i_h, z_w, z_h,
remapX, remapY,
mini, maxi);
hipError_t err = hipGetLastError();
hipDeviceSynchronize();
printf("all kernels done. Latest error: %s\n", hipGetErrorString( err ));
uchar* output;
output = (uchar*) malloc(size);
hipMemcpy(output, r_img, sizeof(uchar)*size, hipMemcpyDeviceToHost);
Mat res = Mat(i_h, i_w, CV_8UC1, output, Mat::AUTO_STEP);
result = res.clone();
//free resources
hipFree(d_img);
hipFree(d_zimg);
hipFree(r_img);
hipFree(remapX);
hipFree(remapY);
} | 9695ed47867df4b2b5fabc2d681800a8f44b057c.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/opencv_modules.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
__global__ void distance_matching(float* i_img, uchar* d_zimg, float* remapX, float* remapY)
{
int idx = blockIdx.x * blockDim.x;
int idy = blockIdx.y * blockDim.y;
int w = 256;
float distance = 100.0;
float search_range = 0.75f;
double disX;
double disY;
disX = (double)(remapX[idx + w*idy] -(float)idx);
disY = (double)(remapY[idx + w*idy] -(float)idy);
distance = pow(disX, 2) + pow(disY, 2);
if ( distance > 0 && distance < search_range) {
i_img[idx + idy*w] *= (1-1/distance);
i_img[idx + idy*w] += ((float)d_zimg[idx + idy*w])*(1/distance);
}
}
__global__ void non_uniform_pixel_fusion(uchar* d_img,
uchar* d_zimg,
uchar* r_img,
int img_w,
int img_h,
int zmd_w,
int zmd_h,
float* remapX,
float* remapY,
float* minimum,
float* maximum)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx > img_w || idy > img_h) return;
uchar subpixels[32];
float weights[32];
for (int i = 0; i < 32; i++) {
subpixels[i] = (uchar)0;
weights[i] = (float)0;
}
double distance;
float average;
float total_weight;
float search_range = 0.75f;
// go trough every pixel of the area found by matching the zoomed image
weights[0] = 1.0f;
subpixels[0] = d_img[idx + idy*img_w];
int px_count = 1;
distance = 100.0;
double disX;
double disY;
if( (float)idx >= minimum[0]-search_range && (float)idy >= minimum[1]+search_range &&
(float)idx <= maximum[0]-search_range && (float)idy <= maximum[1]+search_range) {
for(int k = 0; k < zmd_w; k++) {
for(int l = 0; l < zmd_h; l++) {
disX = (double)(remapX[k + zmd_w*l] -(float)idx);
disY = (double)(remapY[k + zmd_w*l] -(float)idy);
distance = pow(disX, 2) + pow(disY, 2);
if ( distance > 0 && distance < search_range) {
weights[px_count] = (float)(1/distance);
subpixels[px_count] = d_zimg[k + zmd_w*l];
px_count++;
}
}
}
}
// calculate the total weights of the pixels
total_weight = 0;
for(int j = 0; j < px_count; j++) {
total_weight += weights[j];
}
// count the average of the subpixels and set it as the result pixel
if( px_count > 0 ) {
int i;
average = 0;
for(i = 0; i < px_count; i++) {
average += (float)(subpixels[i])*weights[i];
}
r_img[idx + img_w*idy] = (uchar)(average/total_weight);
//r_img[idx + img_w*idy] = 255;
}
}
extern void cuda_doStuff(Mat* img,
Mat& result,
std::vector<Mat>* images,
float* remappedX,
float* remappedY,
float* minimum,
float* maximum,
std::vector<Mat> H)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, 0);
printf("Global memory: %i\n", props.totalGlobalMem);
printf("Warp size: %i\n", props.warpSize);
printf("Threads per blk: %i\n", props.maxThreadsPerBlock);
printf("Max block dim: %i\n", props.maxThreadsDim[0]);
printf("Max grid dim: %i\n", props.maxGridSize[0]);
int i_h = img->size().height;
int i_w = img->size().width;
size_t size = i_w*i_h;
int z_h = images->at(0).size().height;
int z_w = images->at(0).size().width;
size_t zoomed_size = z_w*z_h;
printf("image size: %i\n", size);
printf("img_w: %i\timg_h: %i\tzmg_w: %i\tzmg_h: %i\n", i_w, i_h, z_w, z_h);
printf("min: %f, %f\tmax: %f, %f\n", minimum[0], minimum[1], maximum[0], maximum[1]);
uchar* d_img;
cudaMalloc((void**)&d_img, sizeof(uchar)*size);
uchar* r_img;
cudaMalloc((void**)&r_img, sizeof(uchar)*size);
uchar* d_zimg;
cudaMalloc((void**)&d_zimg, sizeof(uchar)*size);
uchar* h_img;
if(img->isContinuous()) {
h_img = img->data;
}
uchar* h_zimg;
h_zimg = images->at(0).data;
float* remapX;
cudaMalloc(&remapX, sizeof(float)*size);
float* remapY;
cudaMalloc(&remapY, sizeof(float)*size);
float* mini;
cudaMalloc(&mini, sizeof(float)*2);
float* maxi;
cudaMalloc(&maxi, sizeof(float)*2);
//memory copies to the device and prints errors
cudaMemcpy(d_img, h_img, sizeof(uchar)*size, cudaMemcpyHostToDevice);
printf("h_img -> d_img:\t%s\n", cudaGetErrorString(cudaGetLastError()));
cudaThreadSynchronize();
cudaMemcpy(d_zimg, h_zimg, sizeof(uchar)*zoomed_size, cudaMemcpyHostToDevice);
printf("h_zimg -> d_zimg:\t%s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(remapX, remappedX, sizeof(float)*size, cudaMemcpyHostToDevice);
printf("remappedX -> remapX:\t%s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(remapY, remappedY, sizeof(float)*size, cudaMemcpyHostToDevice);
printf("remappedY -> remapY:\t%s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(mini, minimum, sizeof(float)*2, cudaMemcpyHostToDevice);
printf("minimum -> mini:\t%s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(maxi, maximum, sizeof(float)*2, cudaMemcpyHostToDevice);
printf("maximum -> maxi:\t%s\n", cudaGetErrorString(cudaGetLastError()));
dim3 threadsPerBlock(32, 32);
dim3 numberOfBlocks(ceil((float)i_w/(float)threadsPerBlock.x), ceil((float)i_h/(float)threadsPerBlock.y));
non_uniform_pixel_fusion << <numberOfBlocks, threadsPerBlock >> >(d_img, d_zimg, r_img,
i_w, i_h, z_w, z_h,
remapX, remapY,
mini, maxi);
cudaError err = cudaGetLastError();
cudaDeviceSynchronize();
printf("all kernels done. Latest error: %s\n", cudaGetErrorString( err ));
uchar* output;
output = (uchar*) malloc(size);
cudaMemcpy(output, r_img, sizeof(uchar)*size, cudaMemcpyDeviceToHost);
Mat res = Mat(i_h, i_w, CV_8UC1, output, Mat::AUTO_STEP);
result = res.clone();
//free resources
cudaFree(d_img);
cudaFree(d_zimg);
cudaFree(r_img);
cudaFree(remapX);
cudaFree(remapY);
} |
bcd63e7e581fd4c42b39f659584a60c0852fbd01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "canny.cuh"
#include "pixel.cuh"
#include <stdio.h>
#include <cstdlib>
#define BLOCK_SIZE 32
using namespace std;
/**
=========================================== Kernel Convolution =========================================================
This function performs the convolution step on the given image array using the mask array that has been passed.
The output of this step is stored in the output array.
========================================================================================================================
**/
__global__ void convolution_kernel(const uint8_t* image, float* output, const float* mask,
int imageRows, int imageCols, int outputRows, int outputCols,
int maskDimension) {
int tx = threadIdx.x;
int ty = threadIdx.y;
const int TILE_SIZE = (BLOCK_SIZE - maskDimension + 1);
int col = blockIdx.x * TILE_SIZE + tx;
int row = blockIdx.y * TILE_SIZE + ty;
int row_i = row - maskDimension / 2;
int col_i = col - maskDimension / 2;
float tmp = 0;
__shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE];
if (row_i < imageRows && row_i >= 0 && col_i < imageCols && col_i >= 0) {
sharedMem[ty][tx] = (float)image[row_i * imageCols + col_i];
}
else {
sharedMem[ty][tx] = 0;
}
__syncthreads();
if (ty < TILE_SIZE && tx < TILE_SIZE) {
for (int i = 0; i < maskDimension; i++) {
for (int j = 0; j < maskDimension; j++) {
tmp += mask[i * maskDimension + j] * sharedMem[ty + i][tx + j];
}
}
__syncthreads();
if (row < outputRows && col < outputCols) {
output[row * outputCols + col] = tmp;
}
}
}
__global__ void magnitude_matrix_kernel(float* mag, const float* x, const float* y, const int height, const int width) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int array_upper_bound = width * height;
if (index < array_upper_bound) {
float mags = sqrt(x[index] * x[index] + y[index] * y[index]);
mag[index] = mags;
}
} | bcd63e7e581fd4c42b39f659584a60c0852fbd01.cu | #include "canny.cuh"
#include "pixel.cuh"
#include <stdio.h>
#include <cstdlib>
#define BLOCK_SIZE 32
using namespace std;
/**
=========================================== Kernel Convolution =========================================================
This function performs the convolution step on the given image array using the mask array that has been passed.
The output of this step is stored in the output array.
========================================================================================================================
**/
__global__ void convolution_kernel(const uint8_t* image, float* output, const float* mask,
int imageRows, int imageCols, int outputRows, int outputCols,
int maskDimension) {
int tx = threadIdx.x;
int ty = threadIdx.y;
const int TILE_SIZE = (BLOCK_SIZE - maskDimension + 1);
int col = blockIdx.x * TILE_SIZE + tx;
int row = blockIdx.y * TILE_SIZE + ty;
int row_i = row - maskDimension / 2;
int col_i = col - maskDimension / 2;
float tmp = 0;
__shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE];
if (row_i < imageRows && row_i >= 0 && col_i < imageCols && col_i >= 0) {
sharedMem[ty][tx] = (float)image[row_i * imageCols + col_i];
}
else {
sharedMem[ty][tx] = 0;
}
__syncthreads();
if (ty < TILE_SIZE && tx < TILE_SIZE) {
for (int i = 0; i < maskDimension; i++) {
for (int j = 0; j < maskDimension; j++) {
tmp += mask[i * maskDimension + j] * sharedMem[ty + i][tx + j];
}
}
__syncthreads();
if (row < outputRows && col < outputCols) {
output[row * outputCols + col] = tmp;
}
}
}
__global__ void magnitude_matrix_kernel(float* mag, const float* x, const float* y, const int height, const int width) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int array_upper_bound = width * height;
if (index < array_upper_bound) {
float mags = sqrt(x[index] * x[index] + y[index] * y[index]);
mag[index] = mags;
}
} |
1fe8038a08cf9d9c2bfb7a423fcf3de71a6899db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define H(a) (-a * log2f(a))
#define H2(a1, a2, p) (H(((float)(a1) + (p)) / ((float)(a1 + a2) + 1.0f)) + \
H(((float)(a2) + (1.0f - p)) / ((float)(a1 + a2) + 1.0f)))
/* Makra do sumowania tablicy 2 x 3 x 3 */
#define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2])
#define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3])
#define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3])
#define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2))
#define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2))
#define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3))
/* Format danych:
* - wektor wartoci pierwszej zmiennej opisowej *v1s, 1 zmienna, wszystkie obiekty
* - wektor wartoci drugiej zmiennej opisowej *v2s, 1 zmienna, wszystkie obiekty
* - wektor wartoci zmiennych decyzyjnych *ds
* - ilo obiektw num_objects
*/
__device__ float compute_gig_1_2(char *v1s, char *v2s, char *ds, int num_objects, float p)
{
short count[2][3][3] = { 0 };
for (int i = 0; i < num_objects; ++i) {
char d = (ds[i / 8] >> (i % 8)) & 1;
char v1 = (v1s[i / 4] >> ((i % 4) * 2)) & 3;
char v2 = (v2s[i / 4] >> ((i % 4) * 2)) & 3;
count[d][v1][v2]++;
}
float ig1, ig2, ig12, h_p;
h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p);
ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) -
SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) -
SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p);
ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) -
SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) -
SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p);
ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) -
SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) -
SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) -
SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) -
SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) -
SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) -
SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) -
SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) -
SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p);
//printf(" IG(v1) = %f\n", ig1);
//printf(" IG(v2) = %f\n", ig2);
//printf(" IG(v1 u v2) = %f\n", ig12);
return ig12 - ((ig1 > ig2) ? ig1 : ig2);
}
/* Format danych:
* - macierz wartoci zmiennych opisowych *vars, 1 wiersz - 1 zmienna
* - wektor wartoci zmiennych decyzyjnych *ds
* - ilo obiektw num_objects
* - ilo zmiennych num_vars
* - wynikowe GIG
*/
__global__ void compute_gig_kernel(char *vars, char *ds, int num_objects, int num_vars, float *r_gig, float p)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_o_padded = (num_objects - 1) / 4 + 1;
r_gig[v1_p * num_vars + v2_p] = compute_gig_1_2(&vars[v1_p * num_o_padded], &vars[v2_p * num_o_padded], ds, num_objects, p);
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
struct GigStruct {
float gig;
int v1, v2;
};
__global__ void compute_gig_wt_kernel(char *vars, char *ds, int num_objects, int num_vars,
struct GigStruct *r_gig, int max_num_gig_structs, int* num_gig_structs,
float p, float threshold)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_o_padded = (num_objects - 1) / 4 + 1;
float gig = compute_gig_1_2(&vars[v1_p * num_o_padded], &vars[v2_p * num_o_padded], ds, num_objects, p);
if (gig < threshold) return;
/* atomicInc() wraps around to 0 */
int num = atomicAdd(num_gig_structs, 1);
if (num < max_num_gig_structs) {
r_gig[num].gig = gig;
r_gig[num].v1 = v1_p;
r_gig[num].v2 = v2_p;
}
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
/* Komparatory do sortowania _malejco_ */
int compare_gig(const void *a, const void *b)
{
if (((struct GigStruct*)a)->gig > ((struct GigStruct*)b)->gig) return -1;
else if (((struct GigStruct*)a)->gig == ((struct GigStruct*)b)->gig) return 0;
else return 1;
}
int compare_float(const void *a, const void *b)
{
if (*((float*)a) > *((float*)b)) return -1;
else if (*((float*)a) == *((float*)b)) return 0;
else return 1;
}
int main()
{
int num_objects, num_vars, result_size, real_result_size;
float a_priori, threshold;
float input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all;
Timer timer;
timer.start();
scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori);
Sync2BitArray2D vars(num_vars, num_objects);
SyncBitArray ds(num_objects);
/* Czytamy dane */
{
for (int i = 0; i < num_objects; ++i) {
int a; scanf("%d", &a); a &= 1;
ds.setHost(i, a);
for (int j = 0; j < num_vars; ++j) {
int b; scanf("%d", &b); b &= 3;
vars.setHost(j, i, b);
}
}
input = timer.lap();
}
/* Kopiujemy dane na kart */
{
vars.syncToDevice();
ds.syncToDevice();
copy = timer.lap();
}
/* Wykonujemy zrandomizowan prb na pierwszym 10% zmiennych */
{
int random_trial_size = num_vars / 10;
/* Alokacja pamici na wynikowe GIG si nie udaje gdy pami jest > ok. 400MB.
XXX: Tablica gig nie musiaaby by kwadratowa. */
if (random_trial_size > 8192)
random_trial_size = 8192;
float percent = (float)random_trial_size / (float)num_vars ;
SyncArray2D<float> gig(random_trial_size, random_trial_size);
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(random_trial_size, block_size.x) / block_size.x,
padToMultipleOf(random_trial_size, block_size.y) / block_size.y);
hipLaunchKernelGGL(( compute_gig_kernel), dim3(grid_size), dim3(block_size), 0, 0, (char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, random_trial_size, (float*)gig.getDevice(), a_priori);
CUDA_CALL(hipGetLastError());
hipDeviceSynchronize();
random_trial_kernel = timer.lap();
gig.syncToHost();
random_trial_copy = timer.lap();
/* Przepisujemy obliczone GIG do spjnego kawaka pamici,
sortujemy i wybieramy odpowiedni element jako threshold */
{
int num_gig = 0;
float *gig_sorted = (float*)malloc(sizeof(float) * random_trial_size * random_trial_size);
for (int v1_p = 0; v1_p < random_trial_size; ++v1_p)
for (int v2_p = v1_p + 1; v2_p < random_trial_size; ++v2_p)
gig_sorted[num_gig++] = gig.getHostEl(v1_p, v2_p);
qsort(gig_sorted, num_gig, sizeof(float), compare_float);
/* gig_sorted jest posortowany malejco */
threshold = gig_sorted[(int)((float)result_size * percent * percent)];
free(gig_sorted);
}
random_trial_process = timer.lap();
}
/* Wykonujemy docelowe obliczenia na wszystkich zmiennych kernelem,
ktry zapisuje tylko wartoci wiksze ni threshold */
{
const int max_num_structs = result_size * 2;
SyncArray<struct GigStruct> gig_structs(max_num_structs);
SyncVar<int> num_structs;
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(num_vars, block_size.x) / block_size.x,
padToMultipleOf(num_vars, block_size.y) / block_size.y);
hipLaunchKernelGGL(( compute_gig_wt_kernel), dim3(grid_size), dim3(block_size), 0, 0, (char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, num_vars, (struct GigStruct*)gig_structs.getDevice(),
max_num_structs, num_structs.getDevice(), a_priori, threshold);
CUDA_CALL(hipGetLastError());
hipDeviceSynchronize();
main_kernel = timer.lap();
num_structs.syncToHost();
gig_structs.syncToHost();
main_copy = timer.lap();
real_result_size = *num_structs.getHost();
qsort(gig_structs.getHost(), *num_structs.getHost(), sizeof(struct GigStruct), compare_gig);
for (int i = *num_structs.getHost() - 1; i >= 0; --i)
printf("%f %d %d\n", gig_structs.getHostEl(i).gig, gig_structs.getHostEl(i).v1, gig_structs.getHostEl(i).v2);
main_process = timer.lap();
}
all = input + copy + random_trial_kernel + random_trial_copy + random_trial_process + main_kernel + main_copy + main_process;
fprintf(stderr, "data: variables, objects, result_size, true result size, threshold\n");
fprintf(stderr, "%d, %d, %d, %d, %f\n", num_vars, num_objects, result_size, real_result_size, threshold);
fprintf(stderr, "times: input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all\n");
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input, copy, random_trial_kernel,
random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all);
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input / all * 100.0f, copy / all * 100.0f,
random_trial_kernel / all * 100.0f, random_trial_copy / all * 100.0f, random_trial_process / all * 100.0f,
main_kernel / all * 100.0f, main_copy / all * 100.0f, main_process / all * 100.0f);
return 0;
}
| 1fe8038a08cf9d9c2bfb7a423fcf3de71a6899db.cu | #include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define H(a) (-a * log2f(a))
#define H2(a1, a2, p) (H(((float)(a1) + (p)) / ((float)(a1 + a2) + 1.0f)) + \
H(((float)(a2) + (1.0f - p)) / ((float)(a1 + a2) + 1.0f)))
/* Makra do sumowania tablicy 2 x 3 x 3 */
#define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2])
#define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3])
#define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3])
#define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2))
#define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2))
#define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3))
/* Format danych:
* - wektor wartości pierwszej zmiennej opisowej *v1s, 1 zmienna, wszystkie obiekty
* - wektor wartości drugiej zmiennej opisowej *v2s, 1 zmienna, wszystkie obiekty
* - wektor wartości zmiennych decyzyjnych *ds
* - ilość obiektów num_objects
*/
__device__ float compute_gig_1_2(char *v1s, char *v2s, char *ds, int num_objects, float p)
{
short count[2][3][3] = { 0 };
for (int i = 0; i < num_objects; ++i) {
char d = (ds[i / 8] >> (i % 8)) & 1;
char v1 = (v1s[i / 4] >> ((i % 4) * 2)) & 3;
char v2 = (v2s[i / 4] >> ((i % 4) * 2)) & 3;
count[d][v1][v2]++;
}
float ig1, ig2, ig12, h_p;
h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p);
ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) -
SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) -
SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p);
ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) -
SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) -
SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p);
ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) -
SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) -
SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) -
SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) -
SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) -
SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) -
SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) -
SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) -
SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p);
//printf(" IG(v1) = %f\n", ig1);
//printf(" IG(v2) = %f\n", ig2);
//printf(" IG(v1 u v2) = %f\n", ig12);
return ig12 - ((ig1 > ig2) ? ig1 : ig2);
}
/* Format danych:
* - macierz wartości zmiennych opisowych *vars, 1 wiersz - 1 zmienna
* - wektor wartości zmiennych decyzyjnych *ds
* - ilość obiektów num_objects
* - ilość zmiennych num_vars
* - wynikowe GIG
*/
__global__ void compute_gig_kernel(char *vars, char *ds, int num_objects, int num_vars, float *r_gig, float p)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_o_padded = (num_objects - 1) / 4 + 1;
r_gig[v1_p * num_vars + v2_p] = compute_gig_1_2(&vars[v1_p * num_o_padded], &vars[v2_p * num_o_padded], ds, num_objects, p);
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
struct GigStruct {
float gig;
int v1, v2;
};
__global__ void compute_gig_wt_kernel(char *vars, char *ds, int num_objects, int num_vars,
struct GigStruct *r_gig, int max_num_gig_structs, int* num_gig_structs,
float p, float threshold)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_o_padded = (num_objects - 1) / 4 + 1;
float gig = compute_gig_1_2(&vars[v1_p * num_o_padded], &vars[v2_p * num_o_padded], ds, num_objects, p);
if (gig < threshold) return;
/* atomicInc() wraps around to 0 */
int num = atomicAdd(num_gig_structs, 1);
if (num < max_num_gig_structs) {
r_gig[num].gig = gig;
r_gig[num].v1 = v1_p;
r_gig[num].v2 = v2_p;
}
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
/* Komparatory do sortowania _malejąco_ */
int compare_gig(const void *a, const void *b)
{
if (((struct GigStruct*)a)->gig > ((struct GigStruct*)b)->gig) return -1;
else if (((struct GigStruct*)a)->gig == ((struct GigStruct*)b)->gig) return 0;
else return 1;
}
int compare_float(const void *a, const void *b)
{
if (*((float*)a) > *((float*)b)) return -1;
else if (*((float*)a) == *((float*)b)) return 0;
else return 1;
}
int main()
{
int num_objects, num_vars, result_size, real_result_size;
float a_priori, threshold;
float input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all;
Timer timer;
timer.start();
scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori);
Sync2BitArray2D vars(num_vars, num_objects);
SyncBitArray ds(num_objects);
/* Czytamy dane */
{
for (int i = 0; i < num_objects; ++i) {
int a; scanf("%d", &a); a &= 1;
ds.setHost(i, a);
for (int j = 0; j < num_vars; ++j) {
int b; scanf("%d", &b); b &= 3;
vars.setHost(j, i, b);
}
}
input = timer.lap();
}
/* Kopiujemy dane na kartę */
{
vars.syncToDevice();
ds.syncToDevice();
copy = timer.lap();
}
/* Wykonujemy zrandomizowaną próbę na pierwszym 10% zmiennych */
{
int random_trial_size = num_vars / 10;
/* Alokacja pamięci na wynikowe GIG się nie udaje gdy pamięć jest > ok. 400MB.
XXX: Tablica gig nie musiałaby być kwadratowa. */
if (random_trial_size > 8192)
random_trial_size = 8192;
float percent = (float)random_trial_size / (float)num_vars ;
SyncArray2D<float> gig(random_trial_size, random_trial_size);
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(random_trial_size, block_size.x) / block_size.x,
padToMultipleOf(random_trial_size, block_size.y) / block_size.y);
compute_gig_kernel<<<grid_size, block_size>>>((char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, random_trial_size, (float*)gig.getDevice(), a_priori);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
random_trial_kernel = timer.lap();
gig.syncToHost();
random_trial_copy = timer.lap();
/* Przepisujemy obliczone GIG do spójnego kawałka pamięci,
sortujemy i wybieramy odpowiedni element jako threshold */
{
int num_gig = 0;
float *gig_sorted = (float*)malloc(sizeof(float) * random_trial_size * random_trial_size);
for (int v1_p = 0; v1_p < random_trial_size; ++v1_p)
for (int v2_p = v1_p + 1; v2_p < random_trial_size; ++v2_p)
gig_sorted[num_gig++] = gig.getHostEl(v1_p, v2_p);
qsort(gig_sorted, num_gig, sizeof(float), compare_float);
/* gig_sorted jest posortowany malejąco */
threshold = gig_sorted[(int)((float)result_size * percent * percent)];
free(gig_sorted);
}
random_trial_process = timer.lap();
}
/* Wykonujemy docelowe obliczenia na wszystkich zmiennych kernelem,
który zapisuje tylko wartości większe niż threshold */
{
const int max_num_structs = result_size * 2;
SyncArray<struct GigStruct> gig_structs(max_num_structs);
SyncVar<int> num_structs;
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(num_vars, block_size.x) / block_size.x,
padToMultipleOf(num_vars, block_size.y) / block_size.y);
compute_gig_wt_kernel<<<grid_size, block_size>>>((char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, num_vars, (struct GigStruct*)gig_structs.getDevice(),
max_num_structs, num_structs.getDevice(), a_priori, threshold);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
main_kernel = timer.lap();
num_structs.syncToHost();
gig_structs.syncToHost();
main_copy = timer.lap();
real_result_size = *num_structs.getHost();
qsort(gig_structs.getHost(), *num_structs.getHost(), sizeof(struct GigStruct), compare_gig);
for (int i = *num_structs.getHost() - 1; i >= 0; --i)
printf("%f %d %d\n", gig_structs.getHostEl(i).gig, gig_structs.getHostEl(i).v1, gig_structs.getHostEl(i).v2);
main_process = timer.lap();
}
all = input + copy + random_trial_kernel + random_trial_copy + random_trial_process + main_kernel + main_copy + main_process;
fprintf(stderr, "data: variables, objects, result_size, true result size, threshold\n");
fprintf(stderr, "%d, %d, %d, %d, %f\n", num_vars, num_objects, result_size, real_result_size, threshold);
fprintf(stderr, "times: input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all\n");
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input, copy, random_trial_kernel,
random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all);
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input / all * 100.0f, copy / all * 100.0f,
random_trial_kernel / all * 100.0f, random_trial_copy / all * 100.0f, random_trial_process / all * 100.0f,
main_kernel / all * 100.0f, main_copy / all * 100.0f, main_process / all * 100.0f);
return 0;
}
|
21f63710a91d88ce7b7684539960c7d258ddeea2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPU
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace vibe
{
void loadConstants(int nbSamples, int reqMatches, int radius, int subsamplingFactor);
void init_gpu(PtrStepSzb frame, int cn, PtrStepSzb samples, PtrStepSz<unsigned int> randStates, hipStream_t stream);
void update_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<unsigned int> randStates, hipStream_t stream);
}
}}}
namespace cv { namespace gpu { namespace cudev
{
namespace vibe
{
__constant__ int c_nbSamples;
__constant__ int c_reqMatches;
__constant__ int c_radius;
__constant__ int c_subsamplingFactor;
void loadConstants(int nbSamples, int reqMatches, int radius, int subsamplingFactor)
{
cudaSafeCall( hipMemcpyToSymbol(c_nbSamples, &nbSamples, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(c_reqMatches, &reqMatches, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(c_radius, &radius, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(c_subsamplingFactor, &subsamplingFactor, sizeof(int)) );
}
__device__ __forceinline__ uint nextRand(uint& state)
{
const unsigned int CV_RNG_COEFF = 4164903690U;
state = state * CV_RNG_COEFF + (state >> 16);
return state;
}
__constant__ int c_xoff[9] = {-1, 0, 1, -1, 1, -1, 0, 1, 0};
__constant__ int c_yoff[9] = {-1, -1, -1, 0, 0, 1, 1, 1, 0};
__device__ __forceinline__ int2 chooseRandomNeighbor(int x, int y, uint& randState, int count = 8)
{
int idx = nextRand(randState) % count;
return make_int2(x + c_xoff[idx], y + c_yoff[idx]);
}
__device__ __forceinline__ uchar cvt(uchar val)
{
return val;
}
__device__ __forceinline__ uchar4 cvt(const uchar3& val)
{
return make_uchar4(val.x, val.y, val.z, 0);
}
__device__ __forceinline__ uchar4 cvt(const uchar4& val)
{
return val;
}
template <typename SrcT, typename SampleT>
__global__ void init(const PtrStepSz<SrcT> frame, PtrStep<SampleT> samples, PtrStep<uint> randStates)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= frame.cols || y >= frame.rows)
return;
uint localState = randStates(y, x);
for (int k = 0; k < c_nbSamples; ++k)
{
int2 np = chooseRandomNeighbor(x, y, localState, 9);
np.x = ::max(0, ::min(np.x, frame.cols - 1));
np.y = ::max(0, ::min(np.y, frame.rows - 1));
SrcT pix = frame(np.y, np.x);
samples(k * frame.rows + y, x) = cvt(pix);
}
randStates(y, x) = localState;
}
template <typename SrcT, typename SampleT>
void init_caller(PtrStepSzb frame, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(init<SrcT, SampleT>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( init<SrcT, SampleT>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<SrcT>) frame, (PtrStepSz<SampleT>) samples, randStates);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void init_gpu(PtrStepSzb frame, int cn, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream);
static const func_t funcs[] =
{
0, init_caller<uchar, uchar>, 0, init_caller<uchar3, uchar4>, init_caller<uchar4, uchar4>
};
funcs[cn](frame, samples, randStates, stream);
}
__device__ __forceinline__ int calcDist(uchar a, uchar b)
{
return ::abs(a - b);
}
__device__ __forceinline__ int calcDist(const uchar3& a, const uchar4& b)
{
return (::abs(a.x - b.x) + ::abs(a.y - b.y) + ::abs(a.z - b.z)) / 3;
}
__device__ __forceinline__ int calcDist(const uchar4& a, const uchar4& b)
{
return (::abs(a.x - b.x) + ::abs(a.y - b.y) + ::abs(a.z - b.z)) / 3;
}
template <typename SrcT, typename SampleT>
__global__ void update(const PtrStepSz<SrcT> frame, PtrStepb fgmask, PtrStep<SampleT> samples, PtrStep<uint> randStates)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= frame.cols || y >= frame.rows)
return;
uint localState = randStates(y, x);
SrcT imgPix = frame(y, x);
// comparison with the model
int count = 0;
for (int k = 0; (count < c_reqMatches) && (k < c_nbSamples); ++k)
{
SampleT samplePix = samples(k * frame.rows + y, x);
int distance = calcDist(imgPix, samplePix);
if (distance < c_radius)
++count;
}
// pixel classification according to reqMatches
fgmask(y, x) = (uchar) (-(count < c_reqMatches));
if (count >= c_reqMatches)
{
// the pixel belongs to the background
// gets a random number between 0 and subsamplingFactor-1
int randomNumber = nextRand(localState) % c_subsamplingFactor;
// update of the current pixel model
if (randomNumber == 0)
{
// random subsampling
int k = nextRand(localState) % c_nbSamples;
samples(k * frame.rows + y, x) = cvt(imgPix);
}
// update of a neighboring pixel model
randomNumber = nextRand(localState) % c_subsamplingFactor;
if (randomNumber == 0)
{
// random subsampling
// chooses a neighboring pixel randomly
int2 np = chooseRandomNeighbor(x, y, localState);
np.x = ::max(0, ::min(np.x, frame.cols - 1));
np.y = ::max(0, ::min(np.y, frame.rows - 1));
// chooses the value to be replaced randomly
int k = nextRand(localState) % c_nbSamples;
samples(k * frame.rows + np.y, np.x) = cvt(imgPix);
}
}
randStates(y, x) = localState;
}
template <typename SrcT, typename SampleT>
void update_caller(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(update<SrcT, SampleT>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( update<SrcT, SampleT>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<SrcT>) frame, fgmask, (PtrStepSz<SampleT>) samples, randStates);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void update_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream);
static const func_t funcs[] =
{
0, update_caller<uchar, uchar>, 0, update_caller<uchar3, uchar4>, update_caller<uchar4, uchar4>
};
funcs[cn](frame, fgmask, samples, randStates, stream);
}
}
}}}
#endif
| 21f63710a91d88ce7b7684539960c7d258ddeea2.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPU
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace vibe
{
void loadConstants(int nbSamples, int reqMatches, int radius, int subsamplingFactor);
void init_gpu(PtrStepSzb frame, int cn, PtrStepSzb samples, PtrStepSz<unsigned int> randStates, cudaStream_t stream);
void update_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<unsigned int> randStates, cudaStream_t stream);
}
}}}
namespace cv { namespace gpu { namespace cudev
{
namespace vibe
{
__constant__ int c_nbSamples;
__constant__ int c_reqMatches;
__constant__ int c_radius;
__constant__ int c_subsamplingFactor;
void loadConstants(int nbSamples, int reqMatches, int radius, int subsamplingFactor)
{
cudaSafeCall( cudaMemcpyToSymbol(c_nbSamples, &nbSamples, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_reqMatches, &reqMatches, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_radius, &radius, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_subsamplingFactor, &subsamplingFactor, sizeof(int)) );
}
__device__ __forceinline__ uint nextRand(uint& state)
{
const unsigned int CV_RNG_COEFF = 4164903690U;
state = state * CV_RNG_COEFF + (state >> 16);
return state;
}
__constant__ int c_xoff[9] = {-1, 0, 1, -1, 1, -1, 0, 1, 0};
__constant__ int c_yoff[9] = {-1, -1, -1, 0, 0, 1, 1, 1, 0};
__device__ __forceinline__ int2 chooseRandomNeighbor(int x, int y, uint& randState, int count = 8)
{
int idx = nextRand(randState) % count;
return make_int2(x + c_xoff[idx], y + c_yoff[idx]);
}
__device__ __forceinline__ uchar cvt(uchar val)
{
return val;
}
__device__ __forceinline__ uchar4 cvt(const uchar3& val)
{
return make_uchar4(val.x, val.y, val.z, 0);
}
__device__ __forceinline__ uchar4 cvt(const uchar4& val)
{
return val;
}
template <typename SrcT, typename SampleT>
__global__ void init(const PtrStepSz<SrcT> frame, PtrStep<SampleT> samples, PtrStep<uint> randStates)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= frame.cols || y >= frame.rows)
return;
uint localState = randStates(y, x);
for (int k = 0; k < c_nbSamples; ++k)
{
int2 np = chooseRandomNeighbor(x, y, localState, 9);
np.x = ::max(0, ::min(np.x, frame.cols - 1));
np.y = ::max(0, ::min(np.y, frame.rows - 1));
SrcT pix = frame(np.y, np.x);
samples(k * frame.rows + y, x) = cvt(pix);
}
randStates(y, x) = localState;
}
template <typename SrcT, typename SampleT>
void init_caller(PtrStepSzb frame, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(init<SrcT, SampleT>, cudaFuncCachePreferL1) );
init<SrcT, SampleT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, (PtrStepSz<SampleT>) samples, randStates);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void init_gpu(PtrStepSzb frame, int cn, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream);
static const func_t funcs[] =
{
0, init_caller<uchar, uchar>, 0, init_caller<uchar3, uchar4>, init_caller<uchar4, uchar4>
};
funcs[cn](frame, samples, randStates, stream);
}
__device__ __forceinline__ int calcDist(uchar a, uchar b)
{
return ::abs(a - b);
}
__device__ __forceinline__ int calcDist(const uchar3& a, const uchar4& b)
{
return (::abs(a.x - b.x) + ::abs(a.y - b.y) + ::abs(a.z - b.z)) / 3;
}
__device__ __forceinline__ int calcDist(const uchar4& a, const uchar4& b)
{
return (::abs(a.x - b.x) + ::abs(a.y - b.y) + ::abs(a.z - b.z)) / 3;
}
template <typename SrcT, typename SampleT>
__global__ void update(const PtrStepSz<SrcT> frame, PtrStepb fgmask, PtrStep<SampleT> samples, PtrStep<uint> randStates)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= frame.cols || y >= frame.rows)
return;
uint localState = randStates(y, x);
SrcT imgPix = frame(y, x);
// comparison with the model
int count = 0;
for (int k = 0; (count < c_reqMatches) && (k < c_nbSamples); ++k)
{
SampleT samplePix = samples(k * frame.rows + y, x);
int distance = calcDist(imgPix, samplePix);
if (distance < c_radius)
++count;
}
// pixel classification according to reqMatches
fgmask(y, x) = (uchar) (-(count < c_reqMatches));
if (count >= c_reqMatches)
{
// the pixel belongs to the background
// gets a random number between 0 and subsamplingFactor-1
int randomNumber = nextRand(localState) % c_subsamplingFactor;
// update of the current pixel model
if (randomNumber == 0)
{
// random subsampling
int k = nextRand(localState) % c_nbSamples;
samples(k * frame.rows + y, x) = cvt(imgPix);
}
// update of a neighboring pixel model
randomNumber = nextRand(localState) % c_subsamplingFactor;
if (randomNumber == 0)
{
// random subsampling
// chooses a neighboring pixel randomly
int2 np = chooseRandomNeighbor(x, y, localState);
np.x = ::max(0, ::min(np.x, frame.cols - 1));
np.y = ::max(0, ::min(np.y, frame.rows - 1));
// chooses the value to be replaced randomly
int k = nextRand(localState) % c_nbSamples;
samples(k * frame.rows + np.y, np.x) = cvt(imgPix);
}
}
randStates(y, x) = localState;
}
template <typename SrcT, typename SampleT>
void update_caller(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(update<SrcT, SampleT>, cudaFuncCachePreferL1) );
update<SrcT, SampleT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, (PtrStepSz<SampleT>) samples, randStates);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void update_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream);
static const func_t funcs[] =
{
0, update_caller<uchar, uchar>, 0, update_caller<uchar3, uchar4>, update_caller<uchar4, uchar4>
};
funcs[cn](frame, fgmask, samples, randStates, stream);
}
}
}}}
#endif
|
69aa32632d9ac9e3128c135ab14d90e595d744b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cuda_debug_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cuda_debug_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cuda_debug_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cuda_debug_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 69aa32632d9ac9e3128c135ab14d90e595d744b6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cuda_debug_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cuda_debug_kernel<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cuda_debug_kernel<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cuda_debug_kernel<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bf725d3e93f15df36e15691802de4c59462a6148.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box2d1r-256-10-256_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 17
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_10), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.09371f * A[t%2][i-1][j-1] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i-1][j+1] +
0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] +
0.09373f * A[t%2][i+1][j-1] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+1][j+1];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| bf725d3e93f15df36e15691802de4c59462a6148.cu | #include <assert.h>
#include <stdio.h>
#include "box2d1r-256-10-256_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 17
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_10<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.09371f * A[t%2][i-1][j-1] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i-1][j+1] +
0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] +
0.09373f * A[t%2][i+1][j-1] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+1][j+1];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
5c2198b735265bf5808ec7d6d80c55a40dfd7e07.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
namespace cudf {
namespace detail {
template <typename _T, typename _R>
struct unary_cast {
template <typename T = _T,
typename R = _R,
typename std::enable_if_t<(cudf::is_numeric<T>() && cudf::is_numeric<R>())>* = nullptr>
CUDA_DEVICE_CALLABLE R operator()(T const element)
{
return static_cast<R>(element);
}
template <
typename T = _T,
typename R = _R,
typename std::enable_if_t<(cudf::is_timestamp<T>() && cudf::is_timestamp<R>())>* = nullptr>
CUDA_DEVICE_CALLABLE R operator()(T const element)
{
return static_cast<R>(simt::std::chrono::floor<R::duration>(element));
}
template <typename T = _T,
typename R = _R,
typename std::enable_if_t<cudf::is_numeric<T>() && cudf::is_timestamp<R>()>* = nullptr>
CUDA_DEVICE_CALLABLE R operator()(T const element)
{
return static_cast<R>(static_cast<typename R::rep>(element));
}
template <typename T = _T,
typename R = _R,
typename std::enable_if_t<cudf::is_timestamp<T>() && cudf::is_numeric<R>()>* = nullptr>
CUDA_DEVICE_CALLABLE R operator()(T const element)
{
return static_cast<R>(element.time_since_epoch().count());
}
};
template <typename T>
struct dispatch_unary_cast_to {
column_view input;
dispatch_unary_cast_to(column_view inp) : input(inp) {}
template <typename R,
typename std::enable_if_t<cudf::is_numeric<R>() || cudf::is_timestamp<R>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto size = input.size();
auto output = std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), 0, mr},
copy_bitmask(input, 0, mr),
input.null_count());
mutable_column_view output_mutable = *output;
thrust::transform(rmm::exec_policy(stream)->on(stream),
input.begin<T>(),
input.end<T>(),
output_mutable.begin<R>(),
unary_cast<T, R>{});
return output;
}
template <
typename R,
typename std::enable_if_t<!cudf::is_numeric<R>() && !cudf::is_timestamp<R>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("Column type must be numeric or timestamp");
}
};
struct dispatch_unary_cast_from {
column_view input;
dispatch_unary_cast_from(column_view inp) : input(inp) {}
template <typename T,
typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_timestamp<T>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
return type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, mr, stream);
}
template <
typename T,
typename std::enable_if_t<!cudf::is_timestamp<T>() && !cudf::is_numeric<T>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FAIL("Column type must be numeric or timestamp");
}
};
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width.");
return type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, mr, stream);
}
} // namespace detail
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::cast(input, type, mr);
}
} // namespace cudf
| 5c2198b735265bf5808ec7d6d80c55a40dfd7e07.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/unary.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
namespace cudf {
namespace detail {
template <typename _T, typename _R>
struct unary_cast {
template <typename T = _T,
typename R = _R,
typename std::enable_if_t<(cudf::is_numeric<T>() && cudf::is_numeric<R>())>* = nullptr>
CUDA_DEVICE_CALLABLE R operator()(T const element)
{
return static_cast<R>(element);
}
template <
typename T = _T,
typename R = _R,
typename std::enable_if_t<(cudf::is_timestamp<T>() && cudf::is_timestamp<R>())>* = nullptr>
CUDA_DEVICE_CALLABLE R operator()(T const element)
{
return static_cast<R>(simt::std::chrono::floor<R::duration>(element));
}
template <typename T = _T,
typename R = _R,
typename std::enable_if_t<cudf::is_numeric<T>() && cudf::is_timestamp<R>()>* = nullptr>
CUDA_DEVICE_CALLABLE R operator()(T const element)
{
return static_cast<R>(static_cast<typename R::rep>(element));
}
template <typename T = _T,
typename R = _R,
typename std::enable_if_t<cudf::is_timestamp<T>() && cudf::is_numeric<R>()>* = nullptr>
CUDA_DEVICE_CALLABLE R operator()(T const element)
{
return static_cast<R>(element.time_since_epoch().count());
}
};
template <typename T>
struct dispatch_unary_cast_to {
column_view input;
dispatch_unary_cast_to(column_view inp) : input(inp) {}
template <typename R,
typename std::enable_if_t<cudf::is_numeric<R>() || cudf::is_timestamp<R>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto size = input.size();
auto output = std::make_unique<column>(type,
size,
rmm::device_buffer{size * cudf::size_of(type), 0, mr},
copy_bitmask(input, 0, mr),
input.null_count());
mutable_column_view output_mutable = *output;
thrust::transform(rmm::exec_policy(stream)->on(stream),
input.begin<T>(),
input.end<T>(),
output_mutable.begin<R>(),
unary_cast<T, R>{});
return output;
}
template <
typename R,
typename std::enable_if_t<!cudf::is_numeric<R>() && !cudf::is_timestamp<R>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("Column type must be numeric or timestamp");
}
};
struct dispatch_unary_cast_from {
column_view input;
dispatch_unary_cast_from(column_view inp) : input(inp) {}
template <typename T,
typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_timestamp<T>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
return type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, mr, stream);
}
template <
typename T,
typename std::enable_if_t<!cudf::is_timestamp<T>() && !cudf::is_numeric<T>()>* = nullptr>
std::unique_ptr<column> operator()(data_type type,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FAIL("Column type must be numeric or timestamp");
}
};
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width.");
return type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, mr, stream);
}
} // namespace detail
std::unique_ptr<column> cast(column_view const& input,
data_type type,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::cast(input, type, mr);
}
} // namespace cudf
|
4d2ff4269450198982b157454831fcafa608c8af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/sort.h>
#include <thrust/random.h>
#include <thrust/device_ptr.h>
#include <thrust/system/hip/execution_policy.h>
#include <thread>
#include <thrust/scan.h>
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include "fpgrowth.h"
#include <assert.h>
#include <sys/time.h>
#include <hipcub/hipcub.hpp>
__device__ __host__ unsigned int round_up_pow2(unsigned int v){
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
#define ENABLE_ASSERT 0
#define HASH_LOAD_FACTOR (1.0f)
#define NULL_NODE (NULL)
#define STOP_NODE (0xFFFFFFFF)
#define SPLIT_NODE (-2)
#define FREQ_LEN (1)
#define STOP_LEN (1)
#define PAT_LEN (1)
#define PATTERN_INFO_LEN (PAT_LEN+STOP_LEN+FREQ_LEN)
#define MAX_DEPTH (300)
//the size of ia size is based on the
#define IA_SIZE_BASE_ITEM (0)
//the size of ia size is summation of all items
#define IA_SIZE_EACH_ITEMS (1)
#define HT_IARRAY_LEN_PER_ITEM(n_node) (round_up_pow2((unsigned int)ceil(n_node * (1/HASH_LOAD_FACTOR))))
#define HT_IARRAY_LEN(n_fi, n_node) (n_fi * HT_IARRAY_LEN_PER_ITEM(n_node))
#define HT_IARRAY_SIZE(n_fi, n_node) (unsigned int)(HT_IARRAY_LEN(n_fi, n_node)* sizeof(unsigned int))
#define HT_SIZE(n_fi, n_node) (unsigned int)(4*sizeof(unsigned int) + 3 * n_fi *sizeof(unsigned int) + 2*HT_IARRAY_SIZE(n_fi, n_node))
//#define PAT_WITH_PATHID
static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
void free_gpu_mem(void);
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int *d_dummy;
__host__ void gpu_dummy_alloc_thread(){
CUDA_CHECK_RETURN(hipMalloc((void**) &d_dummy, sizeof(int)));
}
int **h_pattern_ans;
__constant__ unsigned int d_plen_level_c[MAX_DEPTH];//the length of a pattern of a level = plen_item[k]*num_pnode[k]
__constant__ unsigned int d_level_clen[MAX_DEPTH];
__constant__ unsigned long long c_fi_with_baseitem[64*63/2];
__constant__ unsigned int c_msup[1];
__constant__ unsigned int c_num_fi[1];
__constant__ unsigned int c_num_fpnode[1];
__constant__ unsigned int c_num_res_vector[1];
__constant__ unsigned int c_gtree_item_base[MAX_FI];
__host__ void alloc_gpu_cltree(CSTREE *cst){
int num_fpnode = cst->num_fpnode;
int num_fi = cst->cnt;
printf("gtree_buf size=%u\n",cst->gtree_size);
void *d_gtreebuf;
CUDA_CHECK_RETURN(hipMalloc((void**) &d_gtreebuf, cst->gtree_size));
cst->d_gtree_itembase = (unsigned int*)d_gtreebuf;
cst->d_gtree = (GTREE_NODE*)(cst->d_gtree_itembase + (((cst->cnt+1)+2)&~1));
CUDA_CHECK_RETURN(hipHostMalloc((void**)&h_pattern_ans,sizeof(int*) * num_fpnode));
}
unsigned int result_size = 0;
void *h_res ;
#define NUM_THREAD_BUILD_TAB (512)
#define UNLOCK (0)
#define LOCK (1)
#define ITEM_LOCK (-1)
unsigned int cal_next_tab_size(const unsigned int *aux_in, const unsigned int num_entry_tab_in, const unsigned int *ia_num, unsigned int *tab_out_offset){
unsigned int next_tab_size = sizeof(unsigned int)*2;//total #fi + next_table_size
for(unsigned int i=0; i<num_entry_tab_in;i++){
tab_out_offset[i] = next_tab_size;
if(aux_in[i])
next_tab_size += HT_SIZE(i, ia_num[i]);
printf("tab_out_offset[%d]=%u\n", i,tab_out_offset[i]);
}
return next_tab_size; // the extra unit is for counting the total number of frequent items
}
#if 0
__device__ unsigned int find_bid_base_idx(unsigned int bid, const unsigned int* __restrict__ d_io){
const unsigned io_cnt = d_io[0];
const unsigned* __restrict__ io = d_io+1;
int find = 0;
int i;
for(i=1;i<io_cnt;i++){
if(bid<io[i]){
find =1;
break;
}
}
if(!find)
return io_cnt-1;
else
return i-1;
}
#endif
__forceinline__ __device__ unsigned int find_bid_base_idx_bsearch(unsigned int bid, const unsigned int* __restrict__ d_io){
const unsigned io_cnt = d_io[0];
const unsigned int* __restrict__ io = d_io+1;
unsigned int l=0, r = io_cnt-1, find = 0;
do{
unsigned int m = (l+r)/2;
// if(!m)
// return 0;
// else if(m == io_cnt-1)
// return io_cnt-1;
if(io[m]<=bid && bid<io[m+1]){
return m;
}else if(bid<io[m]){
r = m;
}else if(bid>=io[m+1]){
l = m+1;
}
}while(r>l);
return io_cnt-1;
}
#define EMPTY_SLOT ((unsigned)-1)
#define HASH_FULL ((unsigned)-2)
#ifndef DBG_HASH
#define DBG_HASH 0
#endif
__forceinline__ __device__ unsigned int hash_qprob_insert(const unsigned int nidx, const unsigned int num_slot, unsigned int* __restrict__ idx_tab,
unsigned int* __restrict__ cnt_tab, const unsigned int val, const int item){
int retry=0;
for(int i=0;i<num_slot;i++){
#if 0
unsigned int idx = (unsigned int)((nidx & (num_slot-1)) + 0.5*i + 0.5 *i*i) & (num_slot-1);
if(idx_tab[idx]==nidx){
atomicAdd_block(&cnt_tab[idx], val);
return nidx;
}else if(atomicCAS_block(&idx_tab[idx], EMPTY_SLOT, nidx) == EMPTY_SLOT){
//atomicAdd_block(&cnt_tab[idx], val);
atomicExch_block(&cnt_tab[idx], val); //init
return EMPTY_SLOT;
}
#endif
#if 1
//unsigned int idx = (unsigned int)(nidx % num_slot + 0.5*i + 0.5 *i*i) % num_slot;
unsigned int idx = (unsigned int)((nidx & (num_slot-1)) + 0.5*i + 0.5 *i*i) & (num_slot-1);
unsigned int ret = atomicCAS_block(&idx_tab[idx], EMPTY_SLOT, nidx);
if((ret == EMPTY_SLOT)){
#if DBG_HASH
printf("<%u, %u> HIT(TRY:%d) #m=%u add item:%d nid:%u at %u ret %u (%p) val:%d\n",blockIdx.x, threadIdx.x, retry, num_slot, item, nidx, idx, ret, &idx_tab[idx],val);
#endif
atomicExch_block(&cnt_tab[idx], val);
return ret;
}else if(ret == nidx){
atomicAdd_block(&cnt_tab[idx], val);
return ret;
}
retry++;
#if DBG_HASH
printf("<%u, %u> CONFLICT #m=%u add item:%d nid:%u at %u ret %u (%p)\n",blockIdx.x, threadIdx.x, num_slot, item, nidx, idx, ret, &idx_tab[idx]);
#endif
#endif
}
return HASH_FULL;
}
#define QHASH
__forceinline__ __device__ unsigned int hash_node_idx(const unsigned int nidx, const unsigned int num_slot, unsigned int* __restrict__ idx_tab,
unsigned int* __restrict__ cnt_tab, const unsigned int val, const int item){
#ifdef QHASH
return hash_qprob_insert(nidx, num_slot, idx_tab, cnt_tab, val, item);
#endif
}
__device__ void* block_init_array(unsigned int* __restrict__ base, const unsigned int len, const unsigned int val){
unsigned int tid = threadIdx.x;
for(int i=tid; i<len; i+=blockDim.x){
base[i] = val;
}
}
#ifndef DBG_FPG_ITER
#define DBG_FPG_ITER 0
#endif
#define GLOBAL_TAB_HEADER_SIZE_BYTE (2 * sizeof(unsigned int))
__global__ void kernel_fpg_iter_gtree(const void *d_tab_in, void *d_tab_out,
const unsigned long long* __restrict__ tab_in_offset, const unsigned long long* __restrict__ tab_out_offset,
const GTREE_NODE* __restrict__ gtree,
const unsigned int smin, const unsigned int* __restrict__ relatived_bid_in, const unsigned int* __restrict__ wo_remap_raw,
unsigned long long* __restrict__ pat_raw, unsigned int* __restrict__ freq_raw, const int max_item, const unsigned int* __restrict__ d_wide_tab_id){
unsigned int tid = threadIdx.x; //the kernel function should be executed by one block only
unsigned long long* __restrict__ pat_cnt = pat_raw;
unsigned long long* __restrict__ pat = pat_cnt+1;
unsigned int *freq = freq_raw;
//unsigned int bid_offset_idx = find_bid_base_idx(blockIdx.x, relatived_bid_in); //add head
//unsigned int bid_offset_idx = find_bid_base_idx_bsearch(blockIdx.x, relatived_bid_in);
unsigned int bid_offset_idx = d_wide_tab_id[blockIdx.x];
unsigned int bid_offset = relatived_bid_in[bid_offset_idx+1];
// unsigned int res = find_bid_base_idx_bsearch(blockIdx.x, relatived_bid_in);
// assert(bid_offset_idx==res);
const int rel_bid = blockIdx.x - bid_offset;
const unsigned long long* __restrict__ ro = tab_in_offset + 1;
const unsigned long long* __restrict__ wo = tab_out_offset + 1;
const unsigned int* __restrict__ wo_remap = wo_remap_raw + 1;
unsigned int* __restrict__ global_num_fi_out = (unsigned int* )d_tab_out;
unsigned int* __restrict__ global_next_tab_size_out = global_num_fi_out + 1;
const unsigned int* __restrict__ global_num_fi_in = (unsigned int* )d_tab_in;
const unsigned int* __restrict__ global_next_tab_size_in = global_num_fi_in + 1;
unsigned int* __restrict__ tab_out = (unsigned int*)((uintptr_t)d_tab_out + wo[wo_remap[blockIdx.x]]+GLOBAL_TAB_HEADER_SIZE_BYTE);
const unsigned long long r_offset = ro[bid_offset_idx];
const unsigned int* __restrict__ tab_in = (unsigned int*)((uintptr_t)d_tab_in + r_offset +GLOBAL_TAB_HEADER_SIZE_BYTE);
const unsigned int* __restrict__ n_fi = tab_in;
const unsigned int num_fi = *n_fi;
const unsigned int* __restrict__ ia_type = n_fi+1;
const unsigned int* __restrict__ ia_size = ia_type+1;
const unsigned int* __restrict__ basepat_idx = ia_size +1;
const unsigned int* __restrict__ items = basepat_idx +1;
const unsigned int* __restrict__ supps = items + num_fi;
const unsigned int* __restrict__ ia_num = supps + num_fi;
const unsigned int* __restrict__ ia_arrays; //= ia_num + n_node;
const unsigned int* __restrict__ node_counts;// = ia_arrays + n_node;
unsigned int item = items[rel_bid];
#if ENABLE_ASSERT
assert(item<max_item);
#endif
//assert(item<5);
unsigned int supp = supps[rel_bid];
unsigned int num_path = ia_num[rel_bid];
unsigned int num_try_path = HT_IARRAY_LEN_PER_ITEM(*ia_size);//round_up_pow2((unsigned)ceil((float)*ia_size / (float)HASH_LOAD_FACTOR));
unsigned int chunk_size = (unsigned)ceil((float)num_try_path/blockDim.x);
unsigned long long pat_idx;
#if 0
if(tid==0)
printf("<%u, %u, %u> item:%u supp:%u\n",blockIdx.x,rel_bid,tid, item,supp);
#endif
if(supp < smin){
return;// all threads of the block return
}
else{
//fill the pattern
if(tid == 0){
pat_idx = atomicAdd(pat_cnt, 1);
int pat_base = pat_idx * *c_num_res_vector;
int sub_idx = item>>6; // DIV 64
if(*basepat_idx == (unsigned int)-1){
for(int i=0;i<*c_num_res_vector;i++){
if(i==sub_idx)
pat[pat_base+i] = 1ULL<<(item & 63);
else
pat[pat_base+i] = 0;
}
freq[pat_idx] = supp;
#if 0
printf("<%u, %u, %u> 1 item:%u pat_idx=%lu pat=0x%016llx freq:%u\n", blockIdx.x,rel_bid,tid, item, pat_idx,pat[pat_idx],freq[pat_idx]);
#endif
#if 0
for(int i=0;i<*c_num_res_vector;i++){
if(i==sub_idx)
pat[pat_base+i] = 1ULL<<(item & 63);
else
pat[pat_base+i] = 0;
}
#endif
}
else{
for(int i=0;i<*c_num_res_vector;i++){
if(i==sub_idx)
pat[pat_base+i] = pat[*basepat_idx * *c_num_res_vector + i] | 1ULL<<(item & 63);
else
pat[pat_base+i] = pat[*basepat_idx * *c_num_res_vector + i];//copy
}
freq[pat_idx] = supp;
#if 0
printf("<%u, %u, %u> 2 basepat_idx=%u pat[*basepat_idx]=0x%016llx item:%u pat_idx=%lu pat=0x%016llx freq:%u\n",
blockIdx.x,rel_bid,tid, *basepat_idx, pat[*basepat_idx] , item, pat_idx, pat[pat_idx], freq[pat_idx] );
#endif
}
if(item)
atomicAdd(global_num_fi_out, item);
}
}
#if DBG_FPG_ITER
__syncthreads();
#endif
//tab_out_offset[0] --> no next run
if(!item )
return;
if(!num_fi)
return;
if(!tab_out_offset[0])
return;
#if 0
if(tid==0)
printf("<%d, %d, %d> bid_offset:%u d_tab_in:%p tab_in:%p d_tab_out:%p tab_out:%p wo_remap=%u wo(remap)=%u(0x%x)\n",blockIdx.x, rel_bid, tid, bid_offset, d_tab_in,tab_in, d_tab_out, tab_out, wo_remap[blockIdx.x],wo[wo_remap[blockIdx.x]] );
#endif
ia_arrays = ia_num + *n_fi;
node_counts = ia_arrays + HT_IARRAY_LEN(*n_fi, *ia_size);
//for new table
unsigned int* __restrict__ new_n_fi = tab_out;
*new_n_fi = item; //0~ item-1
unsigned int* __restrict__ new_ia_type = new_n_fi+1;
*new_ia_type = IA_SIZE_BASE_ITEM;
unsigned int* __restrict__ new_ia_size = new_ia_type+1;
*new_ia_size = num_path;
unsigned int* __restrict__ new_basepat_idx = new_ia_size +1;
unsigned int* __restrict__ new_items = new_basepat_idx +1;
unsigned int* __restrict__ new_supps = new_items + *new_n_fi;
unsigned int* __restrict__ new_item_ia_num = new_supps + *new_n_fi;
unsigned int* __restrict__ new_item_ia_arrays = new_item_ia_num + *new_n_fi;
unsigned int* __restrict__ new_node_counts = new_item_ia_arrays + HT_IARRAY_LEN(*new_n_fi, *new_ia_size);
unsigned int new_iarray_len_per_item = HT_IARRAY_LEN_PER_ITEM(*new_ia_size);
unsigned int new_iarray_len = HT_IARRAY_LEN(*new_n_fi, *new_ia_size);
//unsigned int strip_size = max((unsigned int)ceilf(((float)new_iarray_len)/blockDim.x),(unsigned int)blockDim.x);
//block_init_array(new_item_ia_arrays, new_iarray_len, strip_size, EMPTY_SLOT);
block_init_array(new_item_ia_arrays, new_iarray_len, EMPTY_SLOT);
// block_init_array(new_node_counts, new_iarray_len, 0);
// if(tid==0)
// memset(new_item_ia_arrays, 0xFF, new_iarray_len*sizeof(int));
if(tid==0)
*new_basepat_idx = pat_idx;
for(int i= tid; i<item; i+=blockDim.x){
new_items[i] = i;
new_supps[i] = 0;
new_item_ia_num[i] = 0;
}
__syncthreads();//necessary id blocksize>32
#if DBG_FPG_ITER
if(tid==0)
printf("P <%u, %u> item:%d num_path:%u\n",blockIdx.x, tid, item, num_path);
#endif
if(tid<min(blockDim.x, num_try_path)){
#if 0
printf("<%u, %u> item:%d supp:%d\n",blockIdx.x, tid, item,supp);
#endif
if(supp<smin){
*new_n_fi = 0;
return;
}
#if 0
printf("<%u, %u> item:%d try path %d ~% d\n",blockIdx.x, tid, item,chunk_size*tid, chunk_size*(tid +1));
#endif
//for(unsigned int path_idx=chunk_size*tid ; (path_idx<chunk_size*(tid +1)) && (path_idx<num_try_path); path_idx++){
for(unsigned int path_idx=tid ; (path_idx<num_try_path); path_idx+=blockDim.x){
unsigned int item_ia_idx;
//get base index in its index array
item_ia_idx = num_try_path * item + path_idx;
unsigned int start_supp = node_counts[item_ia_idx];
unsigned int start_idx = ia_arrays[item_ia_idx];
#if 0
if(start_idx != EMPTY_SLOT)
printf("<b:%u, rb:%u, tid:%u> path_idx:%u(m:%u #p=%u #fp=%u) ia_idx:%u start_idx:%u start_supp:%u item_ia_idx:%u\n",blockIdx.x, rel_bid, tid, path_idx, num_try_path, num_path,*s_num_finished_path,item_ia_idx, start_idx, start_supp, item_ia_idx);
#endif
if(start_idx == EMPTY_SLOT)
continue;//next path
#if ENABLE_ASSERT
assert(start_supp>0);
#endif
const GPU_TREE_NODE *n;
n = >ree[start_idx];
int pitem = n->pitem;
unsigned int pidx;
// printf("1st pitem=%d\n", pitem);
while(pitem!=ROOT_ITEM){
pidx = c_gtree_item_base[pitem] + n->index;
n = >ree[pidx];
#if 0
printf("blk:%d(rel_bid:%d) tid:%d idx:%d cur_item=%d pidx=%d\n", blockIdx.x, rel_bid, tid, idx, cur_item,pidx );
#endif
//search array index
unsigned int tmp_item_ia_base = new_iarray_len_per_item * pitem ;//for filling new table's IA
// printf("<%d> base_vec:0x%016llx pathid:%d cur_item:%d tmp_item_ia_base:%d \n", tid, base_vec,path_idx,cur_item,tmp_item_ia_base);
//assert(cur_item< item);
atomicAdd_block(&new_supps[pitem], start_supp);
//hash nodes to assigned item
unsigned int hash_ret;
hash_ret = hash_node_idx(pidx, new_iarray_len_per_item, new_item_ia_arrays + tmp_item_ia_base, new_node_counts+tmp_item_ia_base, start_supp,pitem );
if(hash_ret == EMPTY_SLOT){
#if 1
// printf("blk:%d(rel_bid:%d) tid:%d item:%u @%p ++\n",blockIdx.x, rel_bid, tid, cur_item, &new_item_ia_num[cur_item]);
#endif
atomicAdd_block(&new_item_ia_num[pitem],1);
}
else
assert(hash_ret!=HASH_FULL);
pitem = n->pitem;
}
}
}
//__syncthreads();
//global_next_tab_size
#if 0
if(tid < *n_fi){
unsigned int subtab_size = gpu_cal_next_tab_size(items, ia_num);
atomicAdd(global_next_tab_size, subtab_size);
}
if(tid==0 && blockIdx.x==0)
atomicAdd(global_next_tab_size, 2*sizeof(unsigned int)); //total #FI + next_tab_size
#endif
}
/*
* chess support 30%
* #define MAX_RO_SIZE (50<<20)
* #define MAX_WO_SIZE (50<<20)
* #define MAX_IO_SIZE (50<<20)
* #define MAX_PAT_SIZE (200<<20)
* #define MAX_FREQ_SIZE (100<<20)
* #define MAX_REMAP (5000000-1)
*
* */
#define MAX_RO_SIZE (50<<20)
#define MAX_WO_SIZE (50<<20)
#define MAX_IO_SIZE (50<<20)
#define MAX_PAT_SIZE (200<<20)
#define MAX_FREQ_SIZE (100<<20)
#define MAX_REMAP (1000000-1)
#define MAX_REMAP_SIZE ((MAX_REMAP+1)*sizeof(unsigned int))
#ifndef DBG_CAL_OFFSET
#define DBG_CAL_OFFSET 0
#endif
__global__ void kernel_cal_offset(const unsigned int* __restrict__ tab_in,
unsigned int* __restrict__ pre_idx_offset_raw, unsigned int* __restrict__ new_idx_offset_raw,
unsigned long long* __restrict__ pre_wo_raw, unsigned long long* __restrict__ new_wo_raw,
unsigned int* __restrict__ remap_raw, const unsigned int msup, const unsigned max_item, unsigned int* __restrict__ d_wide_tab_id){ //, unsigned int* __restrict__ new_ro_raw){
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; // a thread = a block
unsigned int *remap_size = remap_raw;
unsigned int *remap = remap_size + 1;
assert(*remap_size<MAX_REMAP);
unsigned int* __restrict__ idx_offset_size = pre_idx_offset_raw;
const unsigned int* __restrict__ idx_offset= idx_offset_size + 1;
const unsigned long long* __restrict__ pre_wo_offset= pre_wo_raw + 1;
const unsigned int* __restrict__ total_num_fi = tab_in;
const unsigned int* __restrict__ next_tab_size = total_num_fi+1;
unsigned long long* __restrict__ new_wo_size = new_wo_raw;
unsigned long long* __restrict__ new_wo = new_wo_size+1;
unsigned int* __restrict__ new_io_size = new_idx_offset_raw;
unsigned int* __restrict__ new_io = new_io_size+1;
const unsigned int* __restrict__ tab;
if(tid < *total_num_fi){
int tab_idx = find_bid_base_idx_bsearch(tid, pre_idx_offset_raw);
d_wide_tab_id[tid] = tab_idx;
#if DBG_CAL_OFFSET
printf("<%u> tab_idx=%u wo=%llu\n", tid,tab_idx,pre_wo_offset[tab_idx]);
#endif
//new_ro[tid] = pre_wo_offset[tab_idx];
tab = (unsigned int*)((uintptr_t)tab_in + pre_wo_offset[tab_idx] + GLOBAL_TAB_HEADER_SIZE_BYTE);
const unsigned int* __restrict__ n_fi = tab;
const unsigned int num_fi = *n_fi;
const unsigned int* __restrict__ ia_type = n_fi+1;
const unsigned int* __restrict__ ia_size = ia_type+1;
const unsigned int* __restrict__ basepat_idx = ia_size +1;
const unsigned int* __restrict__ items = basepat_idx +1;
const unsigned int* __restrict__ supps = items + num_fi;
const unsigned int* __restrict__ ia_num = supps + num_fi;
// const unsigned int* __restrict__ ia_arrays; //= ia_num + n_node;
// const unsigned int* __restrict__ node_counts;// = ia_arrays + n_node;
unsigned int rel_tid = tid -idx_offset[tab_idx];
unsigned int item = items[rel_tid];
unsigned int supp = supps[rel_tid];
#if DBG_CAL_OFFSET
printf("<%u, rel_tid:%u> item:%u supp:%d\n",tid,rel_tid, item, supp);
#endif
assert(item<max_item);
if(item && (supp >= msup)){
//unsigned int num_path = ia_num[rel_tid];
//unsigned int num_path = *ia_size;
unsigned int num_path;
if(*ia_type==IA_SIZE_EACH_ITEMS)
num_path = ia_num[rel_tid];
else
num_path = *ia_size;
remap[tid] = atomicAdd(remap_size, 1);
//#if DBG_CAL_OFFSET
// printf("<%u, rel_tid:%u> item:%u remap=%u\n",tid,rel_tid, item,remap[tid]);
//#endif
//*new_wo_size = *total_num_fi;
new_wo[remap[tid]] = HT_SIZE(item, num_path);
#if DBG_CAL_OFFSET
printf("<%u, rel_tid:%u> item:%u num_path=%u new_wo[%u]=%llu(0x%X) /%llu MB \n",tid,rel_tid, item,num_path,remap[tid],new_wo[remap[tid]],new_wo[remap[tid]],new_wo[remap[tid]]>>20);
#endif
//*new_io_size = *total_num_fi;
new_io[remap[tid]] = item;
// *new_ro_size = *total_num_fi;
atomicAdd(new_wo_size, 1);
atomicAdd(new_io_size, 1);
}
}
}
//int max_node_item = 0;
#define DEBUG_FINAL_GET_TAB_SIZE 0
__host__ void get_num_next_tabs(unsigned long long *d_tmp_wo_raw, unsigned num, unsigned long long *res){
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
//unsigned long long *d_total;
//hipMalloc(&d_total, sizeof(unsigned long long));
#if DEBUG_FINAL_GET_TAB_SIZE
unsigned long long *wobuf_tmp = (unsigned long long*) malloc(MAX_WO_SIZE);
CUDA_CHECK_RETURN(hipMemcpy(wobuf_tmp, d_tmp_wo_raw, MAX_WO_SIZE, hipMemcpyDeviceToHost));//copy the context with counter
#endif
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp_wo_raw+1, d_tmp_wo_raw, num);
// Allocate temporary storage
hipMalloc(&d_temp_storage, temp_storage_bytes);
// Run sum-reduction
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp_wo_raw+1, d_tmp_wo_raw, num);
#if DEBUG_FINAL_GET_TAB_SIZE
CUDA_CHECK_RETURN(hipMemcpy(wobuf_tmp, d_tmp_wo_raw, MAX_WO_SIZE, hipMemcpyDeviceToHost));//copy the context with counter
#endif
// the uint on d_tmp_wo_raw is borrowed for saving the sum or the new tab
CUDA_CHECK_RETURN(hipMemcpy(res, d_tmp_wo_raw, sizeof(unsigned long long), hipMemcpyDeviceToHost));
}
#define DEBUG_FINAL_WO 0
__host__ void final_next_write_offset(unsigned long long *d_dst, unsigned long long *d_src, int num)
{
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
#if DEBUG_FINAL_WO
unsigned long long *wobuf_tmp = (unsigned long long*) malloc(MAX_WO_SIZE);
CUDA_CHECK_RETURN(hipMemcpy(wobuf_tmp, d_dst-1, MAX_WO_SIZE, hipMemcpyDeviceToHost));//copy the context with counter
#endif
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_src, d_dst, num);
// Allocate temporary storage
hipMalloc(&d_temp_storage, temp_storage_bytes);
// Run exclusive prefix sum
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_src, d_dst, num);
#if DEBUG_FINAL_WO
CUDA_CHECK_RETURN(hipMemcpy(wobuf_tmp, d_dst-1, MAX_WO_SIZE, hipMemcpyDeviceToHost));//copy the context with counter
#endif
}
__host__ void fianl_next_index_offset(unsigned int *d_dst, unsigned int *d_src, int num)
{
//prefix sum for IO
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_src, d_dst, num);
// Allocate temporary storage
hipMalloc(&d_temp_storage, temp_storage_bytes);
// Run exclusive prefix sum
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_src, d_dst, num);
#if 0
unsigned int *iobuf_tmp = (unsigned int*) malloc(MAX_IO_SIZE);
CUDA_CHECK_RETURN(hipMemcpy(iobuf_tmp, d_dst-1, MAX_IO_SIZE, hipMemcpyDeviceToHost));//copy the context with counter
#endif
}
__host__ long cuda_main(CSTREE *cst, SUPP smin)
{
int num_fpnode = cst->num_fpnode;
int num_fi = cst->cnt;
cst->real_max_depth += 1;
printf("worst max_depth =%d real max_dekpth=%d\n",cst->max_depth,cst->real_max_depth);
alloc_gpu_cltree(cst);
CUDA_CHECK_RETURN(hipMemcpy(cst->d_gtree_itembase, cst->h_gtree_itembase,cst->gtree_size, hipMemcpyHostToDevice));
assert(MAX_DEPTH > cst->real_max_depth);
//CUDA_CHECK_RETURN(hipMemcpyToSymbol (d_level_clen, cst->level_clen, (cst->real_max_depth) *sizeof(unsigned int )));
CUDA_CHECK_RETURN(hipMemcpyToSymbol (c_msup, &smin, sizeof(unsigned int)));
CUDA_CHECK_RETURN(hipMemcpyToSymbol (c_num_fi, &cst->cnt, sizeof(unsigned int )));
CUDA_CHECK_RETURN(hipMemcpyToSymbol (c_num_fpnode, &cst->num_fpnode, sizeof(unsigned int )));
unsigned int num_res_vector = ((cst->cnt + 63) & ~63) >> 6;
printf("cst->cnt:%d num_res_vector=%u\n", cst->cnt, num_res_vector);
CUDA_CHECK_RETURN(hipMemcpyToSymbol (c_num_res_vector, &num_res_vector, sizeof(unsigned int )));
CUDA_CHECK_RETURN(hipMemcpyToSymbol (c_gtree_item_base, cst->h_gtree_itembase, cst->cnt*sizeof(unsigned int )));
void *global_htab_buf;
void *d_global_htab_buf;
int max_num_node = 0;
for(int i=0; i< cst->cnt;i++){
if(cst->heads[i].cnt > max_num_node)
max_num_node = cst->heads[i].cnt;
}
unsigned int tab_size = HT_SIZE(cst->cnt, max_num_node) + 2 * sizeof(unsigned int);
CUDA_CHECK_RETURN(hipHostMalloc((void**) &global_htab_buf, tab_size));
memset(global_htab_buf, 0, tab_size);
CUDA_CHECK_RETURN(hipMalloc((void**) &d_global_htab_buf, tab_size));
CUDA_CHECK_RETURN(hipMemset(d_global_htab_buf, 0, tab_size));
printf("global htb %p ~ %p\n", global_htab_buf, global_htab_buf+ tab_size);
printf("d_global htb %p ~ %p\n", d_global_htab_buf, (unsigned long long)d_global_htab_buf+ tab_size);
unsigned int *n_global_fi = (unsigned int*) global_htab_buf;
*n_global_fi = cst->cnt;
unsigned int *next_table_size = n_global_fi + 1;
unsigned int *n_fi = next_table_size+1;
unsigned int *type_ia_size = n_fi+1;
unsigned int *ia_size = type_ia_size+1;
unsigned int *basepat_idx = ia_size+1;
*basepat_idx= -1; //means NULL
unsigned int *items = basepat_idx+1;
unsigned int *supps = items + cst->cnt;
unsigned int *ia_num = supps + cst->cnt;
unsigned int *ia_arrays = ia_num + cst->cnt;
memset(ia_arrays, -1, HT_IARRAY_LEN(cst->cnt, max_num_node)*sizeof(unsigned));
unsigned int *node_counts = ia_arrays + HT_IARRAY_LEN(cst->cnt, max_num_node);
*type_ia_size = IA_SIZE_EACH_ITEMS;
*n_fi = cst->cnt;
*ia_size = max_num_node;
//fill 1st htb
for(int i=0;i< cst->cnt;i++){
static unsigned int pre_node = 0;
items[i]=i;
supps[i]=cst->heads[i].supp;
ia_num[i] = cst->heads[i].cnt;
for(int j=0;j<cst->heads[i].cnt; j++){
ia_arrays[pre_node+j] = cst->h_gtree_itembase[i]+j; //index in the gtree
node_counts[pre_node+j] = cst->h_gtree[cst->h_gtree_itembase[i]+j].freq;
}
pre_node += HT_IARRAY_LEN_PER_ITEM(max_num_node);
}
CUDA_CHECK_RETURN(hipMemcpy(d_global_htab_buf, global_htab_buf, tab_size, hipMemcpyHostToDevice));
unsigned int *relatived_id_in, *d_relatived_id_in;
unsigned long long next_tab_size;
void *d_tab_in, *d_tab_out;
unsigned long long *d_tab_in_offset, *d_tab_out_offset;
unsigned int *tab_out;
d_tab_in = d_global_htab_buf;
unsigned int num_result_entry = 0;
unsigned int *global_num_fi;
CUDA_CHECK_RETURN(hipHostMalloc((void**) &global_num_fi, sizeof(unsigned int)));
void *buf_pool;
unsigned int total_buf_size = (MAX_WO_SIZE+MAX_IO_SIZE)*2 + MAX_RO_SIZE + MAX_REMAP_SIZE;
CUDA_CHECK_RETURN(hipMalloc((void**) &buf_pool, total_buf_size));
unsigned int *d_idx_offset_raw, *d_tmp_idx_offset_raw, *d_remap_raw, *d_ro_raw;
unsigned long long *d_wo_raw, *d_tmp_wo_raw;
d_idx_offset_raw = (unsigned int*)buf_pool;
d_tmp_idx_offset_raw = d_idx_offset_raw + MAX_IO_SIZE/sizeof(unsigned int);
d_wo_raw = (unsigned long long*)(d_tmp_idx_offset_raw + MAX_IO_SIZE/sizeof(unsigned long long));
d_tmp_wo_raw = (unsigned long long*)(d_wo_raw + MAX_WO_SIZE/sizeof(unsigned long long));
d_remap_raw = (unsigned int*)(d_tmp_wo_raw + MAX_WO_SIZE/sizeof(unsigned long long));
d_ro_raw = d_remap_raw + MAX_REMAP_SIZE/sizeof(unsigned long long);
//initial idx_offset
unsigned int *init_idx_offset = (unsigned int*) malloc(MAX_IO_SIZE);
init_idx_offset[0] = 1;
init_idx_offset[1] = 0;
unsigned long long *init_wo_raw = (unsigned long long*) malloc(MAX_IO_SIZE);
init_wo_raw[0]=1;
init_wo_raw[1] = 0; //kernel will consider the global header
CUDA_CHECK_RETURN(hipMemcpy(d_idx_offset_raw, init_idx_offset, 2* sizeof(unsigned int), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_wo_raw, init_wo_raw, 2 * sizeof(unsigned long long), hipMemcpyHostToDevice));
d_tab_in_offset = d_wo_raw;
d_tab_out_offset = d_tmp_wo_raw;
unsigned int *d_bid_offset_raw = d_idx_offset_raw;
unsigned int *d_bid_offset_next_raw = d_tmp_idx_offset_raw;
unsigned int *d_write_offset_remap = d_remap_raw;
*global_num_fi = cst->cnt;
void *d_res;
unsigned long long *d_pat;
unsigned int *d_freq;
CUDA_CHECK_RETURN(hipMalloc((void**) &d_res, MAX_PAT_SIZE+ MAX_FREQ_SIZE));
d_pat = (unsigned long long*)d_res;
d_freq = (unsigned int*)((uintptr_t)d_pat + MAX_PAT_SIZE);
CUDA_CHECK_RETURN(hipMemset(d_pat, 0, sizeof(unsigned long long)));
int num_wo;
int k=1;
size_t old_global_num_fi = 0, new_global_num_fi = *global_num_fi;
size_t old_tab_in_size = 0;
void *d_old_tab_in=0;
do{
printf("== %d-item set==\n",k++);
printf("kernel_cal_offset\n");
unsigned int *d_wide_tab_id;
if(new_global_num_fi > old_global_num_fi){
CUDA_CHECK_RETURN(hipMalloc((void**) &d_wide_tab_id, new_global_num_fi * sizeof(unsigned int)));
//printf("new d_wide_tab_id:%llu\n",new_global_num_fi);
old_global_num_fi = new_global_num_fi;
}else{
//printf("reuse d_wide_tab_id:%llu\n",old_global_num_fi);
}
CUDA_CHECK_RETURN(hipMemset(d_write_offset_remap, 0, sizeof(unsigned long long)));
CUDA_CHECK_RETURN(hipMemset(d_bid_offset_next_raw, 0, sizeof(unsigned long long)));
CUDA_CHECK_RETURN(hipMemset(d_tab_out_offset, 0, sizeof(unsigned int)));
hipLaunchKernelGGL(( kernel_cal_offset), dim3(ceil(new_global_num_fi/128.0)),dim3(128), 0, 0, (unsigned int*)d_tab_in,
d_bid_offset_raw, d_bid_offset_next_raw,
d_tab_in_offset, d_tab_out_offset,
d_write_offset_remap, smin, cst->cnt, d_wide_tab_id);
CUDA_CHECK_RETURN(hipMemcpy(&num_wo, d_tab_out_offset, sizeof(unsigned int), hipMemcpyDeviceToHost));
//printf("#tab in next run : %u(size:%uKB)\n",num_wo, (num_wo*sizeof(unsigned long long))>>10);
get_num_next_tabs(d_tab_out_offset, num_wo, &next_tab_size);
printf("next_tab_size in next run : %lluMB\n",next_tab_size>>20);
if(num_wo){
//final_next_write_offset(d_wo_raw+1, d_tmp_wo_raw+1, num_wo);
final_next_write_offset(d_tab_out_offset+1, d_tab_out_offset+1, num_wo);
//get_num_next_tabs(d_tmp_wo_raw, num_wo, &next_tab_size);
next_tab_size += sizeof(unsigned int)*2;
// count size of the next table and fill the tab_out_offset
//CUDA_CHECK_RETURN(hipHostMalloc((void**) &tab_out, next_tab_size));
if(next_tab_size>old_tab_in_size){
if(d_old_tab_in){
//printf("free d_old_tab_in:%p\n",d_old_tab_in);
CUDA_CHECK_RETURN(hipFree(d_old_tab_in));
}
CUDA_CHECK_RETURN(hipMalloc((void**) &d_tab_out, next_tab_size));
// printf("d_tab_in:0x%p new d_tab_out:%p(%lluMB)\n",d_tab_in, d_tab_out, next_tab_size>>20);
}else{
d_tab_out = d_old_tab_in;
// printf("d_tab_in:0x%p reuse d_tab_out = d_old_tab_in:%p(%lluMB)\n",d_tab_in, d_tab_out, old_tab_in_size>>20);
}
// printf("num_wo=%u next_tab_size=%u(%p~%p)\n",num_wo, next_tab_size, d_tab_out, (uintptr_t)d_tab_out + next_tab_size);
//CUDA_CHECK_RETURN(hipMemset(d_tab_out, 0, next_tab_size));
CUDA_CHECK_RETURN(hipMemset(d_tab_out, 0, 8));// clear global counters for all blocks, it is for support initializing tables by each block's self
}
printf("kernel_fpg_iter\n");
hipLaunchKernelGGL(( kernel_fpg_iter_gtree), dim3(new_global_num_fi),dim3(512), 0, 0, d_tab_in,d_tab_out,d_tab_in_offset,d_tab_out_offset,
cst->d_gtree, smin, d_bid_offset_raw, d_write_offset_remap, d_pat, d_freq, cst->cnt, d_wide_tab_id);
//printf("%s\n",hipGetErrorString(hipGetLastError()));
if(!num_wo)
break;
// CUDA_CHECK_RETURN(hipMemcpy(tab_out, d_tab_out, next_tab_size, hipMemcpyDeviceToHost));//for debug
CUDA_CHECK_RETURN(hipMemcpy(global_num_fi, d_tab_out, sizeof(unsigned int), hipMemcpyDeviceToHost));
printf("global_num_fi=%u\n",*global_num_fi);
new_global_num_fi = *global_num_fi;
void *d_ptmp;
//swap input and output tab
// CUDA_CHECK_RETURN(hipFree(d_tab_in));
if(new_global_num_fi> old_global_num_fi)
CUDA_CHECK_RETURN(hipFree(d_wide_tab_id));
d_old_tab_in = d_tab_in;
old_tab_in_size = tab_size;
d_tab_in = d_tab_out;
tab_size = next_tab_size;
fianl_next_index_offset(d_bid_offset_next_raw+1,d_bid_offset_next_raw+1,num_wo);
d_ptmp = d_bid_offset_raw;
d_bid_offset_raw = d_bid_offset_next_raw;
d_bid_offset_next_raw = (unsigned int*)d_ptmp;
//swap WO buf
d_ptmp = d_tab_in_offset;
d_tab_in_offset = d_tab_out_offset;
d_tab_out_offset = (unsigned long long*)d_ptmp;
}while(num_wo);
/*
unsigned int *tab_out;
CUDA_CHECK_RETURN(hipHostMalloc((void**) &tab_out, next_tab_size));
CUDA_CHECK_RETURN(hipMemcpy(tab_out, d_tab_out, next_tab_size, hipMemcpyDeviceToHost));
*/
//hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipHostMalloc((void**) &h_res, sizeof(unsigned long long)));
CUDA_CHECK_RETURN(hipMemcpy(h_res, d_res, sizeof(unsigned long long), hipMemcpyDeviceToHost));
unsigned long long *h_pat = (unsigned long long*)h_res;
printf("CUDA #pat = %llu\n",h_pat[0]);
return (long) h_res;
}
void free_gpu_mem()
{
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err)
{
if (err == hipSuccess)
return;
std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
| 4d2ff4269450198982b157454831fcafa608c8af.cu | #include <thrust/sort.h>
#include <thrust/random.h>
#include <thrust/device_ptr.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thread>
#include <thrust/scan.h>
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include "fpgrowth.h"
#include <assert.h>
#include <sys/time.h>
#include <cub/cub.cuh>
__device__ __host__ unsigned int round_up_pow2(unsigned int v){
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
#define ENABLE_ASSERT 0
#define HASH_LOAD_FACTOR (1.0f)
#define NULL_NODE (NULL)
#define STOP_NODE (0xFFFFFFFF)
#define SPLIT_NODE (-2)
#define FREQ_LEN (1)
#define STOP_LEN (1)
#define PAT_LEN (1)
#define PATTERN_INFO_LEN (PAT_LEN+STOP_LEN+FREQ_LEN)
#define MAX_DEPTH (300)
//the size of ia size is based on the
#define IA_SIZE_BASE_ITEM (0)
//the size of ia size is summation of all items
#define IA_SIZE_EACH_ITEMS (1)
#define HT_IARRAY_LEN_PER_ITEM(n_node) (round_up_pow2((unsigned int)ceil(n_node * (1/HASH_LOAD_FACTOR))))
#define HT_IARRAY_LEN(n_fi, n_node) (n_fi * HT_IARRAY_LEN_PER_ITEM(n_node))
#define HT_IARRAY_SIZE(n_fi, n_node) (unsigned int)(HT_IARRAY_LEN(n_fi, n_node)* sizeof(unsigned int))
#define HT_SIZE(n_fi, n_node) (unsigned int)(4*sizeof(unsigned int) + 3 * n_fi *sizeof(unsigned int) + 2*HT_IARRAY_SIZE(n_fi, n_node))
//#define PAT_WITH_PATHID
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
void free_gpu_mem(void);
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int *d_dummy;
__host__ void gpu_dummy_alloc_thread(){
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_dummy, sizeof(int)));
}
int **h_pattern_ans;
__constant__ unsigned int d_plen_level_c[MAX_DEPTH];//the length of a pattern of a level = plen_item[k]*num_pnode[k]
__constant__ unsigned int d_level_clen[MAX_DEPTH];
__constant__ unsigned long long c_fi_with_baseitem[64*63/2];
__constant__ unsigned int c_msup[1];
__constant__ unsigned int c_num_fi[1];
__constant__ unsigned int c_num_fpnode[1];
__constant__ unsigned int c_num_res_vector[1];
__constant__ unsigned int c_gtree_item_base[MAX_FI];
__host__ void alloc_gpu_cltree(CSTREE *cst){
int num_fpnode = cst->num_fpnode;
int num_fi = cst->cnt;
printf("gtree_buf size=%u\n",cst->gtree_size);
void *d_gtreebuf;
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_gtreebuf, cst->gtree_size));
cst->d_gtree_itembase = (unsigned int*)d_gtreebuf;
cst->d_gtree = (GTREE_NODE*)(cst->d_gtree_itembase + (((cst->cnt+1)+2)&~1));
CUDA_CHECK_RETURN(cudaMallocHost((void**)&h_pattern_ans,sizeof(int*) * num_fpnode));
}
unsigned int result_size = 0;
void *h_res ;
#define NUM_THREAD_BUILD_TAB (512)
#define UNLOCK (0)
#define LOCK (1)
#define ITEM_LOCK (-1)
unsigned int cal_next_tab_size(const unsigned int *aux_in, const unsigned int num_entry_tab_in, const unsigned int *ia_num, unsigned int *tab_out_offset){
unsigned int next_tab_size = sizeof(unsigned int)*2;//total #fi + next_table_size
for(unsigned int i=0; i<num_entry_tab_in;i++){
tab_out_offset[i] = next_tab_size;
if(aux_in[i])
next_tab_size += HT_SIZE(i, ia_num[i]);
printf("tab_out_offset[%d]=%u\n", i,tab_out_offset[i]);
}
return next_tab_size; // the extra unit is for counting the total number of frequent items
}
#if 0
__device__ unsigned int find_bid_base_idx(unsigned int bid, const unsigned int* __restrict__ d_io){
const unsigned io_cnt = d_io[0];
const unsigned* __restrict__ io = d_io+1;
int find = 0;
int i;
for(i=1;i<io_cnt;i++){
if(bid<io[i]){
find =1;
break;
}
}
if(!find)
return io_cnt-1;
else
return i-1;
}
#endif
__forceinline__ __device__ unsigned int find_bid_base_idx_bsearch(unsigned int bid, const unsigned int* __restrict__ d_io){
const unsigned io_cnt = d_io[0];
const unsigned int* __restrict__ io = d_io+1;
unsigned int l=0, r = io_cnt-1, find = 0;
do{
unsigned int m = (l+r)/2;
// if(!m)
// return 0;
// else if(m == io_cnt-1)
// return io_cnt-1;
if(io[m]<=bid && bid<io[m+1]){
return m;
}else if(bid<io[m]){
r = m;
}else if(bid>=io[m+1]){
l = m+1;
}
}while(r>l);
return io_cnt-1;
}
#define EMPTY_SLOT ((unsigned)-1)
#define HASH_FULL ((unsigned)-2)
#ifndef DBG_HASH
#define DBG_HASH 0
#endif
__forceinline__ __device__ unsigned int hash_qprob_insert(const unsigned int nidx, const unsigned int num_slot, unsigned int* __restrict__ idx_tab,
unsigned int* __restrict__ cnt_tab, const unsigned int val, const int item){
int retry=0;
for(int i=0;i<num_slot;i++){
#if 0
unsigned int idx = (unsigned int)((nidx & (num_slot-1)) + 0.5*i + 0.5 *i*i) & (num_slot-1);
if(idx_tab[idx]==nidx){
atomicAdd_block(&cnt_tab[idx], val);
return nidx;
}else if(atomicCAS_block(&idx_tab[idx], EMPTY_SLOT, nidx) == EMPTY_SLOT){
//atomicAdd_block(&cnt_tab[idx], val);
atomicExch_block(&cnt_tab[idx], val); //init
return EMPTY_SLOT;
}
#endif
#if 1
//unsigned int idx = (unsigned int)(nidx % num_slot + 0.5*i + 0.5 *i*i) % num_slot;
unsigned int idx = (unsigned int)((nidx & (num_slot-1)) + 0.5*i + 0.5 *i*i) & (num_slot-1);
unsigned int ret = atomicCAS_block(&idx_tab[idx], EMPTY_SLOT, nidx);
if((ret == EMPTY_SLOT)){
#if DBG_HASH
printf("<%u, %u> HIT(TRY:%d) #m=%u add item:%d nid:%u at %u ret %u (%p) val:%d\n",blockIdx.x, threadIdx.x, retry, num_slot, item, nidx, idx, ret, &idx_tab[idx],val);
#endif
atomicExch_block(&cnt_tab[idx], val);
return ret;
}else if(ret == nidx){
atomicAdd_block(&cnt_tab[idx], val);
return ret;
}
retry++;
#if DBG_HASH
printf("<%u, %u> CONFLICT #m=%u add item:%d nid:%u at %u ret %u (%p)\n",blockIdx.x, threadIdx.x, num_slot, item, nidx, idx, ret, &idx_tab[idx]);
#endif
#endif
}
return HASH_FULL;
}
#define QHASH
__forceinline__ __device__ unsigned int hash_node_idx(const unsigned int nidx, const unsigned int num_slot, unsigned int* __restrict__ idx_tab,
unsigned int* __restrict__ cnt_tab, const unsigned int val, const int item){
#ifdef QHASH
return hash_qprob_insert(nidx, num_slot, idx_tab, cnt_tab, val, item);
#endif
}
__device__ void* block_init_array(unsigned int* __restrict__ base, const unsigned int len, const unsigned int val){
unsigned int tid = threadIdx.x;
for(int i=tid; i<len; i+=blockDim.x){
base[i] = val;
}
}
#ifndef DBG_FPG_ITER
#define DBG_FPG_ITER 0
#endif
#define GLOBAL_TAB_HEADER_SIZE_BYTE (2 * sizeof(unsigned int))
__global__ void kernel_fpg_iter_gtree(const void *d_tab_in, void *d_tab_out,
const unsigned long long* __restrict__ tab_in_offset, const unsigned long long* __restrict__ tab_out_offset,
const GTREE_NODE* __restrict__ gtree,
const unsigned int smin, const unsigned int* __restrict__ relatived_bid_in, const unsigned int* __restrict__ wo_remap_raw,
unsigned long long* __restrict__ pat_raw, unsigned int* __restrict__ freq_raw, const int max_item, const unsigned int* __restrict__ d_wide_tab_id){
unsigned int tid = threadIdx.x; //the kernel function should be executed by one block only
unsigned long long* __restrict__ pat_cnt = pat_raw;
unsigned long long* __restrict__ pat = pat_cnt+1;
unsigned int *freq = freq_raw;
//unsigned int bid_offset_idx = find_bid_base_idx(blockIdx.x, relatived_bid_in); //add head
//unsigned int bid_offset_idx = find_bid_base_idx_bsearch(blockIdx.x, relatived_bid_in);
unsigned int bid_offset_idx = d_wide_tab_id[blockIdx.x];
unsigned int bid_offset = relatived_bid_in[bid_offset_idx+1];
// unsigned int res = find_bid_base_idx_bsearch(blockIdx.x, relatived_bid_in);
// assert(bid_offset_idx==res);
const int rel_bid = blockIdx.x - bid_offset;
const unsigned long long* __restrict__ ro = tab_in_offset + 1;
const unsigned long long* __restrict__ wo = tab_out_offset + 1;
const unsigned int* __restrict__ wo_remap = wo_remap_raw + 1;
unsigned int* __restrict__ global_num_fi_out = (unsigned int* )d_tab_out;
unsigned int* __restrict__ global_next_tab_size_out = global_num_fi_out + 1;
const unsigned int* __restrict__ global_num_fi_in = (unsigned int* )d_tab_in;
const unsigned int* __restrict__ global_next_tab_size_in = global_num_fi_in + 1;
unsigned int* __restrict__ tab_out = (unsigned int*)((uintptr_t)d_tab_out + wo[wo_remap[blockIdx.x]]+GLOBAL_TAB_HEADER_SIZE_BYTE);
const unsigned long long r_offset = ro[bid_offset_idx];
const unsigned int* __restrict__ tab_in = (unsigned int*)((uintptr_t)d_tab_in + r_offset +GLOBAL_TAB_HEADER_SIZE_BYTE);
const unsigned int* __restrict__ n_fi = tab_in;
const unsigned int num_fi = *n_fi;
const unsigned int* __restrict__ ia_type = n_fi+1;
const unsigned int* __restrict__ ia_size = ia_type+1;
const unsigned int* __restrict__ basepat_idx = ia_size +1;
const unsigned int* __restrict__ items = basepat_idx +1;
const unsigned int* __restrict__ supps = items + num_fi;
const unsigned int* __restrict__ ia_num = supps + num_fi;
const unsigned int* __restrict__ ia_arrays; //= ia_num + n_node;
const unsigned int* __restrict__ node_counts;// = ia_arrays + n_node;
unsigned int item = items[rel_bid];
#if ENABLE_ASSERT
assert(item<max_item);
#endif
//assert(item<5);
unsigned int supp = supps[rel_bid];
unsigned int num_path = ia_num[rel_bid];
unsigned int num_try_path = HT_IARRAY_LEN_PER_ITEM(*ia_size);//round_up_pow2((unsigned)ceil((float)*ia_size / (float)HASH_LOAD_FACTOR));
unsigned int chunk_size = (unsigned)ceil((float)num_try_path/blockDim.x);
unsigned long long pat_idx;
#if 0
if(tid==0)
printf("<%u, %u, %u> item:%u supp:%u\n",blockIdx.x,rel_bid,tid, item,supp);
#endif
if(supp < smin){
return;// all threads of the block return
}
else{
//fill the pattern
if(tid == 0){
pat_idx = atomicAdd(pat_cnt, 1);
int pat_base = pat_idx * *c_num_res_vector;
int sub_idx = item>>6; // DIV 64
if(*basepat_idx == (unsigned int)-1){
for(int i=0;i<*c_num_res_vector;i++){
if(i==sub_idx)
pat[pat_base+i] = 1ULL<<(item & 63);
else
pat[pat_base+i] = 0;
}
freq[pat_idx] = supp;
#if 0
printf("<%u, %u, %u> 1 item:%u pat_idx=%lu pat=0x%016llx freq:%u\n", blockIdx.x,rel_bid,tid, item, pat_idx,pat[pat_idx],freq[pat_idx]);
#endif
#if 0
for(int i=0;i<*c_num_res_vector;i++){
if(i==sub_idx)
pat[pat_base+i] = 1ULL<<(item & 63);
else
pat[pat_base+i] = 0;
}
#endif
}
else{
for(int i=0;i<*c_num_res_vector;i++){
if(i==sub_idx)
pat[pat_base+i] = pat[*basepat_idx * *c_num_res_vector + i] | 1ULL<<(item & 63);
else
pat[pat_base+i] = pat[*basepat_idx * *c_num_res_vector + i];//copy
}
freq[pat_idx] = supp;
#if 0
printf("<%u, %u, %u> 2 basepat_idx=%u pat[*basepat_idx]=0x%016llx item:%u pat_idx=%lu pat=0x%016llx freq:%u\n",
blockIdx.x,rel_bid,tid, *basepat_idx, pat[*basepat_idx] , item, pat_idx, pat[pat_idx], freq[pat_idx] );
#endif
}
if(item)
atomicAdd(global_num_fi_out, item);
}
}
#if DBG_FPG_ITER
__syncthreads();
#endif
//tab_out_offset[0] --> no next run
if(!item )
return;
if(!num_fi)
return;
if(!tab_out_offset[0])
return;
#if 0
if(tid==0)
printf("<%d, %d, %d> bid_offset:%u d_tab_in:%p tab_in:%p d_tab_out:%p tab_out:%p wo_remap=%u wo(remap)=%u(0x%x)\n",blockIdx.x, rel_bid, tid, bid_offset, d_tab_in,tab_in, d_tab_out, tab_out, wo_remap[blockIdx.x],wo[wo_remap[blockIdx.x]] );
#endif
ia_arrays = ia_num + *n_fi;
node_counts = ia_arrays + HT_IARRAY_LEN(*n_fi, *ia_size);
//for new table
unsigned int* __restrict__ new_n_fi = tab_out;
*new_n_fi = item; //0~ item-1
unsigned int* __restrict__ new_ia_type = new_n_fi+1;
*new_ia_type = IA_SIZE_BASE_ITEM;
unsigned int* __restrict__ new_ia_size = new_ia_type+1;
*new_ia_size = num_path;
unsigned int* __restrict__ new_basepat_idx = new_ia_size +1;
unsigned int* __restrict__ new_items = new_basepat_idx +1;
unsigned int* __restrict__ new_supps = new_items + *new_n_fi;
unsigned int* __restrict__ new_item_ia_num = new_supps + *new_n_fi;
unsigned int* __restrict__ new_item_ia_arrays = new_item_ia_num + *new_n_fi;
unsigned int* __restrict__ new_node_counts = new_item_ia_arrays + HT_IARRAY_LEN(*new_n_fi, *new_ia_size);
unsigned int new_iarray_len_per_item = HT_IARRAY_LEN_PER_ITEM(*new_ia_size);
unsigned int new_iarray_len = HT_IARRAY_LEN(*new_n_fi, *new_ia_size);
//unsigned int strip_size = max((unsigned int)ceilf(((float)new_iarray_len)/blockDim.x),(unsigned int)blockDim.x);
//block_init_array(new_item_ia_arrays, new_iarray_len, strip_size, EMPTY_SLOT);
block_init_array(new_item_ia_arrays, new_iarray_len, EMPTY_SLOT);
// block_init_array(new_node_counts, new_iarray_len, 0);
// if(tid==0)
// memset(new_item_ia_arrays, 0xFF, new_iarray_len*sizeof(int));
if(tid==0)
*new_basepat_idx = pat_idx;
for(int i= tid; i<item; i+=blockDim.x){
new_items[i] = i;
new_supps[i] = 0;
new_item_ia_num[i] = 0;
}
__syncthreads();//necessary id blocksize>32
#if DBG_FPG_ITER
if(tid==0)
printf("P <%u, %u> item:%d num_path:%u\n",blockIdx.x, tid, item, num_path);
#endif
if(tid<min(blockDim.x, num_try_path)){
#if 0
printf("<%u, %u> item:%d supp:%d\n",blockIdx.x, tid, item,supp);
#endif
if(supp<smin){
*new_n_fi = 0;
return;
}
#if 0
printf("<%u, %u> item:%d try path %d ~% d\n",blockIdx.x, tid, item,chunk_size*tid, chunk_size*(tid +1));
#endif
//for(unsigned int path_idx=chunk_size*tid ; (path_idx<chunk_size*(tid +1)) && (path_idx<num_try_path); path_idx++){
for(unsigned int path_idx=tid ; (path_idx<num_try_path); path_idx+=blockDim.x){
unsigned int item_ia_idx;
//get base index in its index array
item_ia_idx = num_try_path * item + path_idx;
unsigned int start_supp = node_counts[item_ia_idx];
unsigned int start_idx = ia_arrays[item_ia_idx];
#if 0
if(start_idx != EMPTY_SLOT)
printf("<b:%u, rb:%u, tid:%u> path_idx:%u(m:%u #p=%u #fp=%u) ia_idx:%u start_idx:%u start_supp:%u item_ia_idx:%u\n",blockIdx.x, rel_bid, tid, path_idx, num_try_path, num_path,*s_num_finished_path,item_ia_idx, start_idx, start_supp, item_ia_idx);
#endif
if(start_idx == EMPTY_SLOT)
continue;//next path
#if ENABLE_ASSERT
assert(start_supp>0);
#endif
const GPU_TREE_NODE *n;
n = >ree[start_idx];
int pitem = n->pitem;
unsigned int pidx;
// printf("1st pitem=%d\n", pitem);
while(pitem!=ROOT_ITEM){
pidx = c_gtree_item_base[pitem] + n->index;
n = >ree[pidx];
#if 0
printf("blk:%d(rel_bid:%d) tid:%d idx:%d cur_item=%d pidx=%d\n", blockIdx.x, rel_bid, tid, idx, cur_item,pidx );
#endif
//search array index
unsigned int tmp_item_ia_base = new_iarray_len_per_item * pitem ;//for filling new table's IA
// printf("<%d> base_vec:0x%016llx pathid:%d cur_item:%d tmp_item_ia_base:%d \n", tid, base_vec,path_idx,cur_item,tmp_item_ia_base);
//assert(cur_item< item);
atomicAdd_block(&new_supps[pitem], start_supp);
//hash nodes to assigned item
unsigned int hash_ret;
hash_ret = hash_node_idx(pidx, new_iarray_len_per_item, new_item_ia_arrays + tmp_item_ia_base, new_node_counts+tmp_item_ia_base, start_supp,pitem );
if(hash_ret == EMPTY_SLOT){
#if 1
// printf("blk:%d(rel_bid:%d) tid:%d item:%u @%p ++\n",blockIdx.x, rel_bid, tid, cur_item, &new_item_ia_num[cur_item]);
#endif
atomicAdd_block(&new_item_ia_num[pitem],1);
}
else
assert(hash_ret!=HASH_FULL);
pitem = n->pitem;
}
}
}
//__syncthreads();
//global_next_tab_size
#if 0
if(tid < *n_fi){
unsigned int subtab_size = gpu_cal_next_tab_size(items, ia_num);
atomicAdd(global_next_tab_size, subtab_size);
}
if(tid==0 && blockIdx.x==0)
atomicAdd(global_next_tab_size, 2*sizeof(unsigned int)); //total #FI + next_tab_size
#endif
}
/*
* chess support 30%
* #define MAX_RO_SIZE (50<<20)
* #define MAX_WO_SIZE (50<<20)
* #define MAX_IO_SIZE (50<<20)
* #define MAX_PAT_SIZE (200<<20)
* #define MAX_FREQ_SIZE (100<<20)
* #define MAX_REMAP (5000000-1)
*
* */
#define MAX_RO_SIZE (50<<20)
#define MAX_WO_SIZE (50<<20)
#define MAX_IO_SIZE (50<<20)
#define MAX_PAT_SIZE (200<<20)
#define MAX_FREQ_SIZE (100<<20)
#define MAX_REMAP (1000000-1)
#define MAX_REMAP_SIZE ((MAX_REMAP+1)*sizeof(unsigned int))
#ifndef DBG_CAL_OFFSET
#define DBG_CAL_OFFSET 0
#endif
__global__ void kernel_cal_offset(const unsigned int* __restrict__ tab_in,
unsigned int* __restrict__ pre_idx_offset_raw, unsigned int* __restrict__ new_idx_offset_raw,
unsigned long long* __restrict__ pre_wo_raw, unsigned long long* __restrict__ new_wo_raw,
unsigned int* __restrict__ remap_raw, const unsigned int msup, const unsigned max_item, unsigned int* __restrict__ d_wide_tab_id){ //, unsigned int* __restrict__ new_ro_raw){
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; // a thread = a block
unsigned int *remap_size = remap_raw;
unsigned int *remap = remap_size + 1;
assert(*remap_size<MAX_REMAP);
unsigned int* __restrict__ idx_offset_size = pre_idx_offset_raw;
const unsigned int* __restrict__ idx_offset= idx_offset_size + 1;
const unsigned long long* __restrict__ pre_wo_offset= pre_wo_raw + 1;
const unsigned int* __restrict__ total_num_fi = tab_in;
const unsigned int* __restrict__ next_tab_size = total_num_fi+1;
unsigned long long* __restrict__ new_wo_size = new_wo_raw;
unsigned long long* __restrict__ new_wo = new_wo_size+1;
unsigned int* __restrict__ new_io_size = new_idx_offset_raw;
unsigned int* __restrict__ new_io = new_io_size+1;
const unsigned int* __restrict__ tab;
if(tid < *total_num_fi){
int tab_idx = find_bid_base_idx_bsearch(tid, pre_idx_offset_raw);
d_wide_tab_id[tid] = tab_idx;
#if DBG_CAL_OFFSET
printf("<%u> tab_idx=%u wo=%llu\n", tid,tab_idx,pre_wo_offset[tab_idx]);
#endif
//new_ro[tid] = pre_wo_offset[tab_idx];
tab = (unsigned int*)((uintptr_t)tab_in + pre_wo_offset[tab_idx] + GLOBAL_TAB_HEADER_SIZE_BYTE);
const unsigned int* __restrict__ n_fi = tab;
const unsigned int num_fi = *n_fi;
const unsigned int* __restrict__ ia_type = n_fi+1;
const unsigned int* __restrict__ ia_size = ia_type+1;
const unsigned int* __restrict__ basepat_idx = ia_size +1;
const unsigned int* __restrict__ items = basepat_idx +1;
const unsigned int* __restrict__ supps = items + num_fi;
const unsigned int* __restrict__ ia_num = supps + num_fi;
// const unsigned int* __restrict__ ia_arrays; //= ia_num + n_node;
// const unsigned int* __restrict__ node_counts;// = ia_arrays + n_node;
unsigned int rel_tid = tid -idx_offset[tab_idx];
unsigned int item = items[rel_tid];
unsigned int supp = supps[rel_tid];
#if DBG_CAL_OFFSET
printf("<%u, rel_tid:%u> item:%u supp:%d\n",tid,rel_tid, item, supp);
#endif
assert(item<max_item);
if(item && (supp >= msup)){
//unsigned int num_path = ia_num[rel_tid];
//unsigned int num_path = *ia_size;
unsigned int num_path;
if(*ia_type==IA_SIZE_EACH_ITEMS)
num_path = ia_num[rel_tid];
else
num_path = *ia_size;
remap[tid] = atomicAdd(remap_size, 1);
//#if DBG_CAL_OFFSET
// printf("<%u, rel_tid:%u> item:%u remap=%u\n",tid,rel_tid, item,remap[tid]);
//#endif
//*new_wo_size = *total_num_fi;
new_wo[remap[tid]] = HT_SIZE(item, num_path);
#if DBG_CAL_OFFSET
printf("<%u, rel_tid:%u> item:%u num_path=%u new_wo[%u]=%llu(0x%X) /%llu MB \n",tid,rel_tid, item,num_path,remap[tid],new_wo[remap[tid]],new_wo[remap[tid]],new_wo[remap[tid]]>>20);
#endif
//*new_io_size = *total_num_fi;
new_io[remap[tid]] = item;
// *new_ro_size = *total_num_fi;
atomicAdd(new_wo_size, 1);
atomicAdd(new_io_size, 1);
}
}
}
//int max_node_item = 0;
#define DEBUG_FINAL_GET_TAB_SIZE 0
__host__ void get_num_next_tabs(unsigned long long *d_tmp_wo_raw, unsigned num, unsigned long long *res){
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
//unsigned long long *d_total;
//cudaMalloc(&d_total, sizeof(unsigned long long));
#if DEBUG_FINAL_GET_TAB_SIZE
unsigned long long *wobuf_tmp = (unsigned long long*) malloc(MAX_WO_SIZE);
CUDA_CHECK_RETURN(cudaMemcpy(wobuf_tmp, d_tmp_wo_raw, MAX_WO_SIZE, cudaMemcpyDeviceToHost));//copy the context with counter
#endif
cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp_wo_raw+1, d_tmp_wo_raw, num);
// Allocate temporary storage
cudaMalloc(&d_temp_storage, temp_storage_bytes);
// Run sum-reduction
cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_tmp_wo_raw+1, d_tmp_wo_raw, num);
#if DEBUG_FINAL_GET_TAB_SIZE
CUDA_CHECK_RETURN(cudaMemcpy(wobuf_tmp, d_tmp_wo_raw, MAX_WO_SIZE, cudaMemcpyDeviceToHost));//copy the context with counter
#endif
// the uint on d_tmp_wo_raw is borrowed for saving the sum or the new tab
CUDA_CHECK_RETURN(cudaMemcpy(res, d_tmp_wo_raw, sizeof(unsigned long long), cudaMemcpyDeviceToHost));
}
#define DEBUG_FINAL_WO 0
__host__ void final_next_write_offset(unsigned long long *d_dst, unsigned long long *d_src, int num)
{
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
#if DEBUG_FINAL_WO
unsigned long long *wobuf_tmp = (unsigned long long*) malloc(MAX_WO_SIZE);
CUDA_CHECK_RETURN(cudaMemcpy(wobuf_tmp, d_dst-1, MAX_WO_SIZE, cudaMemcpyDeviceToHost));//copy the context with counter
#endif
cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_src, d_dst, num);
// Allocate temporary storage
cudaMalloc(&d_temp_storage, temp_storage_bytes);
// Run exclusive prefix sum
cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_src, d_dst, num);
#if DEBUG_FINAL_WO
CUDA_CHECK_RETURN(cudaMemcpy(wobuf_tmp, d_dst-1, MAX_WO_SIZE, cudaMemcpyDeviceToHost));//copy the context with counter
#endif
}
__host__ void fianl_next_index_offset(unsigned int *d_dst, unsigned int *d_src, int num)
{
//prefix sum for IO
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_src, d_dst, num);
// Allocate temporary storage
cudaMalloc(&d_temp_storage, temp_storage_bytes);
// Run exclusive prefix sum
cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_src, d_dst, num);
#if 0
unsigned int *iobuf_tmp = (unsigned int*) malloc(MAX_IO_SIZE);
CUDA_CHECK_RETURN(cudaMemcpy(iobuf_tmp, d_dst-1, MAX_IO_SIZE, cudaMemcpyDeviceToHost));//copy the context with counter
#endif
}
__host__ long cuda_main(CSTREE *cst, SUPP smin)
{
int num_fpnode = cst->num_fpnode;
int num_fi = cst->cnt;
cst->real_max_depth += 1;
printf("worst max_depth =%d real max_dekpth=%d\n",cst->max_depth,cst->real_max_depth);
alloc_gpu_cltree(cst);
CUDA_CHECK_RETURN(cudaMemcpy(cst->d_gtree_itembase, cst->h_gtree_itembase,cst->gtree_size, cudaMemcpyHostToDevice));
assert(MAX_DEPTH > cst->real_max_depth);
//CUDA_CHECK_RETURN(cudaMemcpyToSymbol (d_level_clen, cst->level_clen, (cst->real_max_depth) *sizeof(unsigned int )));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol (c_msup, &smin, sizeof(unsigned int)));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol (c_num_fi, &cst->cnt, sizeof(unsigned int )));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol (c_num_fpnode, &cst->num_fpnode, sizeof(unsigned int )));
unsigned int num_res_vector = ((cst->cnt + 63) & ~63) >> 6;
printf("cst->cnt:%d num_res_vector=%u\n", cst->cnt, num_res_vector);
CUDA_CHECK_RETURN(cudaMemcpyToSymbol (c_num_res_vector, &num_res_vector, sizeof(unsigned int )));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol (c_gtree_item_base, cst->h_gtree_itembase, cst->cnt*sizeof(unsigned int )));
void *global_htab_buf;
void *d_global_htab_buf;
int max_num_node = 0;
for(int i=0; i< cst->cnt;i++){
if(cst->heads[i].cnt > max_num_node)
max_num_node = cst->heads[i].cnt;
}
unsigned int tab_size = HT_SIZE(cst->cnt, max_num_node) + 2 * sizeof(unsigned int);
CUDA_CHECK_RETURN(cudaMallocHost((void**) &global_htab_buf, tab_size));
memset(global_htab_buf, 0, tab_size);
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_global_htab_buf, tab_size));
CUDA_CHECK_RETURN(cudaMemset(d_global_htab_buf, 0, tab_size));
printf("global htb %p ~ %p\n", global_htab_buf, global_htab_buf+ tab_size);
printf("d_global htb %p ~ %p\n", d_global_htab_buf, (unsigned long long)d_global_htab_buf+ tab_size);
unsigned int *n_global_fi = (unsigned int*) global_htab_buf;
*n_global_fi = cst->cnt;
unsigned int *next_table_size = n_global_fi + 1;
unsigned int *n_fi = next_table_size+1;
unsigned int *type_ia_size = n_fi+1;
unsigned int *ia_size = type_ia_size+1;
unsigned int *basepat_idx = ia_size+1;
*basepat_idx= -1; //means NULL
unsigned int *items = basepat_idx+1;
unsigned int *supps = items + cst->cnt;
unsigned int *ia_num = supps + cst->cnt;
unsigned int *ia_arrays = ia_num + cst->cnt;
memset(ia_arrays, -1, HT_IARRAY_LEN(cst->cnt, max_num_node)*sizeof(unsigned));
unsigned int *node_counts = ia_arrays + HT_IARRAY_LEN(cst->cnt, max_num_node);
*type_ia_size = IA_SIZE_EACH_ITEMS;
*n_fi = cst->cnt;
*ia_size = max_num_node;
//fill 1st htb
for(int i=0;i< cst->cnt;i++){
static unsigned int pre_node = 0;
items[i]=i;
supps[i]=cst->heads[i].supp;
ia_num[i] = cst->heads[i].cnt;
for(int j=0;j<cst->heads[i].cnt; j++){
ia_arrays[pre_node+j] = cst->h_gtree_itembase[i]+j; //index in the gtree
node_counts[pre_node+j] = cst->h_gtree[cst->h_gtree_itembase[i]+j].freq;
}
pre_node += HT_IARRAY_LEN_PER_ITEM(max_num_node);
}
CUDA_CHECK_RETURN(cudaMemcpy(d_global_htab_buf, global_htab_buf, tab_size, cudaMemcpyHostToDevice));
unsigned int *relatived_id_in, *d_relatived_id_in;
unsigned long long next_tab_size;
void *d_tab_in, *d_tab_out;
unsigned long long *d_tab_in_offset, *d_tab_out_offset;
unsigned int *tab_out;
d_tab_in = d_global_htab_buf;
unsigned int num_result_entry = 0;
unsigned int *global_num_fi;
CUDA_CHECK_RETURN(cudaMallocHost((void**) &global_num_fi, sizeof(unsigned int)));
void *buf_pool;
unsigned int total_buf_size = (MAX_WO_SIZE+MAX_IO_SIZE)*2 + MAX_RO_SIZE + MAX_REMAP_SIZE;
CUDA_CHECK_RETURN(cudaMalloc((void**) &buf_pool, total_buf_size));
unsigned int *d_idx_offset_raw, *d_tmp_idx_offset_raw, *d_remap_raw, *d_ro_raw;
unsigned long long *d_wo_raw, *d_tmp_wo_raw;
d_idx_offset_raw = (unsigned int*)buf_pool;
d_tmp_idx_offset_raw = d_idx_offset_raw + MAX_IO_SIZE/sizeof(unsigned int);
d_wo_raw = (unsigned long long*)(d_tmp_idx_offset_raw + MAX_IO_SIZE/sizeof(unsigned long long));
d_tmp_wo_raw = (unsigned long long*)(d_wo_raw + MAX_WO_SIZE/sizeof(unsigned long long));
d_remap_raw = (unsigned int*)(d_tmp_wo_raw + MAX_WO_SIZE/sizeof(unsigned long long));
d_ro_raw = d_remap_raw + MAX_REMAP_SIZE/sizeof(unsigned long long);
//initial idx_offset
unsigned int *init_idx_offset = (unsigned int*) malloc(MAX_IO_SIZE);
init_idx_offset[0] = 1;
init_idx_offset[1] = 0;
unsigned long long *init_wo_raw = (unsigned long long*) malloc(MAX_IO_SIZE);
init_wo_raw[0]=1;
init_wo_raw[1] = 0; //kernel will consider the global header
CUDA_CHECK_RETURN(cudaMemcpy(d_idx_offset_raw, init_idx_offset, 2* sizeof(unsigned int), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_wo_raw, init_wo_raw, 2 * sizeof(unsigned long long), cudaMemcpyHostToDevice));
d_tab_in_offset = d_wo_raw;
d_tab_out_offset = d_tmp_wo_raw;
unsigned int *d_bid_offset_raw = d_idx_offset_raw;
unsigned int *d_bid_offset_next_raw = d_tmp_idx_offset_raw;
unsigned int *d_write_offset_remap = d_remap_raw;
*global_num_fi = cst->cnt;
void *d_res;
unsigned long long *d_pat;
unsigned int *d_freq;
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_res, MAX_PAT_SIZE+ MAX_FREQ_SIZE));
d_pat = (unsigned long long*)d_res;
d_freq = (unsigned int*)((uintptr_t)d_pat + MAX_PAT_SIZE);
CUDA_CHECK_RETURN(cudaMemset(d_pat, 0, sizeof(unsigned long long)));
int num_wo;
int k=1;
size_t old_global_num_fi = 0, new_global_num_fi = *global_num_fi;
size_t old_tab_in_size = 0;
void *d_old_tab_in=0;
do{
printf("== %d-item set==\n",k++);
printf("kernel_cal_offset\n");
unsigned int *d_wide_tab_id;
if(new_global_num_fi > old_global_num_fi){
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_wide_tab_id, new_global_num_fi * sizeof(unsigned int)));
//printf("new d_wide_tab_id:%llu\n",new_global_num_fi);
old_global_num_fi = new_global_num_fi;
}else{
//printf("reuse d_wide_tab_id:%llu\n",old_global_num_fi);
}
CUDA_CHECK_RETURN(cudaMemset(d_write_offset_remap, 0, sizeof(unsigned long long)));
CUDA_CHECK_RETURN(cudaMemset(d_bid_offset_next_raw, 0, sizeof(unsigned long long)));
CUDA_CHECK_RETURN(cudaMemset(d_tab_out_offset, 0, sizeof(unsigned int)));
kernel_cal_offset<<<ceil(new_global_num_fi/128.0),128>>>((unsigned int*)d_tab_in,
d_bid_offset_raw, d_bid_offset_next_raw,
d_tab_in_offset, d_tab_out_offset,
d_write_offset_remap, smin, cst->cnt, d_wide_tab_id);
CUDA_CHECK_RETURN(cudaMemcpy(&num_wo, d_tab_out_offset, sizeof(unsigned int), cudaMemcpyDeviceToHost));
//printf("#tab in next run : %u(size:%uKB)\n",num_wo, (num_wo*sizeof(unsigned long long))>>10);
get_num_next_tabs(d_tab_out_offset, num_wo, &next_tab_size);
printf("next_tab_size in next run : %lluMB\n",next_tab_size>>20);
if(num_wo){
//final_next_write_offset(d_wo_raw+1, d_tmp_wo_raw+1, num_wo);
final_next_write_offset(d_tab_out_offset+1, d_tab_out_offset+1, num_wo);
//get_num_next_tabs(d_tmp_wo_raw, num_wo, &next_tab_size);
next_tab_size += sizeof(unsigned int)*2;
// count size of the next table and fill the tab_out_offset
//CUDA_CHECK_RETURN(cudaMallocHost((void**) &tab_out, next_tab_size));
if(next_tab_size>old_tab_in_size){
if(d_old_tab_in){
//printf("free d_old_tab_in:%p\n",d_old_tab_in);
CUDA_CHECK_RETURN(cudaFree(d_old_tab_in));
}
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_tab_out, next_tab_size));
// printf("d_tab_in:0x%p new d_tab_out:%p(%lluMB)\n",d_tab_in, d_tab_out, next_tab_size>>20);
}else{
d_tab_out = d_old_tab_in;
// printf("d_tab_in:0x%p reuse d_tab_out = d_old_tab_in:%p(%lluMB)\n",d_tab_in, d_tab_out, old_tab_in_size>>20);
}
// printf("num_wo=%u next_tab_size=%u(%p~%p)\n",num_wo, next_tab_size, d_tab_out, (uintptr_t)d_tab_out + next_tab_size);
//CUDA_CHECK_RETURN(cudaMemset(d_tab_out, 0, next_tab_size));
CUDA_CHECK_RETURN(cudaMemset(d_tab_out, 0, 8));// clear global counters for all blocks, it is for support initializing tables by each block's self
}
printf("kernel_fpg_iter\n");
kernel_fpg_iter_gtree<<<new_global_num_fi,512>>>(d_tab_in,d_tab_out,d_tab_in_offset,d_tab_out_offset,
cst->d_gtree, smin, d_bid_offset_raw, d_write_offset_remap, d_pat, d_freq, cst->cnt, d_wide_tab_id);
//printf("%s\n",cudaGetErrorString(cudaGetLastError()));
if(!num_wo)
break;
// CUDA_CHECK_RETURN(cudaMemcpy(tab_out, d_tab_out, next_tab_size, cudaMemcpyDeviceToHost));//for debug
CUDA_CHECK_RETURN(cudaMemcpy(global_num_fi, d_tab_out, sizeof(unsigned int), cudaMemcpyDeviceToHost));
printf("global_num_fi=%u\n",*global_num_fi);
new_global_num_fi = *global_num_fi;
void *d_ptmp;
//swap input and output tab
// CUDA_CHECK_RETURN(cudaFree(d_tab_in));
if(new_global_num_fi> old_global_num_fi)
CUDA_CHECK_RETURN(cudaFree(d_wide_tab_id));
d_old_tab_in = d_tab_in;
old_tab_in_size = tab_size;
d_tab_in = d_tab_out;
tab_size = next_tab_size;
fianl_next_index_offset(d_bid_offset_next_raw+1,d_bid_offset_next_raw+1,num_wo);
d_ptmp = d_bid_offset_raw;
d_bid_offset_raw = d_bid_offset_next_raw;
d_bid_offset_next_raw = (unsigned int*)d_ptmp;
//swap WO buf
d_ptmp = d_tab_in_offset;
d_tab_in_offset = d_tab_out_offset;
d_tab_out_offset = (unsigned long long*)d_ptmp;
}while(num_wo);
/*
unsigned int *tab_out;
CUDA_CHECK_RETURN(cudaMallocHost((void**) &tab_out, next_tab_size));
CUDA_CHECK_RETURN(cudaMemcpy(tab_out, d_tab_out, next_tab_size, cudaMemcpyDeviceToHost));
*/
//cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMallocHost((void**) &h_res, sizeof(unsigned long long)));
CUDA_CHECK_RETURN(cudaMemcpy(h_res, d_res, sizeof(unsigned long long), cudaMemcpyDeviceToHost));
unsigned long long *h_pat = (unsigned long long*)h_res;
printf("CUDA #pat = %llu\n",h_pat[0]);
return (long) h_res;
}
void free_gpu_mem()
{
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
0101c5f76a0ecd654949e86f75fe6d9398c549e6.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
namespace at::native {
namespace {
const char chebyshev_polynomial_v_name[] = "chebyshev_polynomial_v_forward";
void chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_v_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_v_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_v_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_v_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_v_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_v_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_v_stub, &chebyshev_polynomial_v_kernel_cuda);
} // namespace at::native
| 0101c5f76a0ecd654949e86f75fe6d9398c549e6.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
const char chebyshev_polynomial_v_name[] = "chebyshev_polynomial_v_forward";
void chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_v_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<chebyshev_polynomial_v_name, scalar_t, scalar_t>(iterator, chebyshev_polynomial_v_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "chebyshev_polynomial_v_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return chebyshev_polynomial_v_forward<scalar_t, true>(x, n);
});
});
#endif
} // chebyshev_polynomial_v_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(chebyshev_polynomial_v_stub, &chebyshev_polynomial_v_kernel_cuda);
} // namespace at::native
|
b1ccc0d04b2f29f3ad041f9cc05a012f85f7bc9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
__global__ void default_function_kernel0(const float* __restrict__ Data,
const float* __restrict__ K0,
const float* __restrict__ K1,
const float* __restrict__ K2,
float* __restrict__ Output) {
float Output_local[1];
__shared__ float Data_shared[128];
__shared__ float K0_shared[32];
__shared__ float K1_shared[64];
__shared__ float K2_shared[1];
Output_local[0] = 0.000000e+00f;
for (int rr_outer = 0; rr_outer < 137; ++rr_outer) {
for (int rs0_outer = 0; rs0_outer < 2; ++rs0_outer) {
for (int rs2_outer = 0; rs2_outer < 16; ++rs2_outer) {
__syncthreads();
for (int ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner = 0; ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner < 8; ++ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) {
Data_shared[(((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner)] = Data[((((((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) / 128) * 4096) + (rs0_outer * 2048)) + (((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) % 128) * 16)) + rs2_outer)];
}
for (int ax0_ax1_ax2_fused_fused_inner_inner_inner = 0; ax0_ax1_ax2_fused_fused_inner_inner_inner < 2; ++ax0_ax1_ax2_fused_fused_inner_inner_inner) {
K0_shared[(((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + ax0_ax1_ax2_fused_fused_inner_inner_inner)] = K0[(((((rs0_outer * 4384) + (((int)threadIdx.z) * 1096)) + (((int)threadIdx.y) * 274)) + (ax0_ax1_ax2_fused_fused_inner_inner_inner * 137)) + rr_outer)];
}
for (int ax0_ax1_ax2_fused_fused_inner_inner_inner1 = 0; ax0_ax1_ax2_fused_fused_inner_inner_inner1 < 4; ++ax0_ax1_ax2_fused_fused_inner_inner_inner1) {
K1_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 4)) + ax0_ax1_ax2_fused_fused_inner_inner_inner1)] = K1[((((((int)threadIdx.z) * 2192) + (((int)threadIdx.y) * 548)) + (ax0_ax1_ax2_fused_fused_inner_inner_inner1 * 137)) + rr_outer)];
}
if (((int)threadIdx.y) < (1 - ((int)threadIdx.z))) {
if (((int)threadIdx.y) < 1) {
if (((int)threadIdx.y) < ((16 - rs2_outer) - ((int)threadIdx.z))) {
K2_shared[(((int)threadIdx.y) + ((int)threadIdx.z))] = K2[(((((((int)threadIdx.y) * 548) + (((int)threadIdx.z) * 548)) + (rs2_outer * 548)) + (((int)blockIdx.x) * 137)) + rr_outer)];
}
}
}
__syncthreads();
for (int rs0_inner = 0; rs0_inner < 8; ++rs0_inner) {
for (int rs1_inner = 0; rs1_inner < 16; ++rs1_inner) {
Output_local[0] = (Output_local[0] + (((Data_shared[((rs0_inner * 16) + rs1_inner)] * K0_shared[((rs0_inner * 4) + ((int)threadIdx.z))]) * K1_shared[((rs1_inner * 4) + ((int)threadIdx.y))]) * K2_shared[0]));
}
}
}
}
}
Output[(((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 4)) + ((int)blockIdx.x))] = Output_local[0];
}
void DenseCpKernelLauncher(const float* U, const float* K0,
const float* K1, const float* KC, float* V){
dim3 gridDim0(4, 1, 1);
dim3 blockDim0(1, 4, 4);
hipLaunchKernelGGL(( default_function_kernel0), dim3(gridDim0), dim3(blockDim0), 0, 0, U, K0, K1, KC, V);
hipDeviceSynchronize();
}
#endif
| b1ccc0d04b2f29f3ad041f9cc05a012f85f7bc9d.cu | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
__global__ void default_function_kernel0(const float* __restrict__ Data,
const float* __restrict__ K0,
const float* __restrict__ K1,
const float* __restrict__ K2,
float* __restrict__ Output) {
float Output_local[1];
__shared__ float Data_shared[128];
__shared__ float K0_shared[32];
__shared__ float K1_shared[64];
__shared__ float K2_shared[1];
Output_local[0] = 0.000000e+00f;
for (int rr_outer = 0; rr_outer < 137; ++rr_outer) {
for (int rs0_outer = 0; rs0_outer < 2; ++rs0_outer) {
for (int rs2_outer = 0; rs2_outer < 16; ++rs2_outer) {
__syncthreads();
for (int ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner = 0; ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner < 8; ++ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) {
Data_shared[(((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner)] = Data[((((((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) / 128) * 4096) + (rs0_outer * 2048)) + (((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) % 128) * 16)) + rs2_outer)];
}
for (int ax0_ax1_ax2_fused_fused_inner_inner_inner = 0; ax0_ax1_ax2_fused_fused_inner_inner_inner < 2; ++ax0_ax1_ax2_fused_fused_inner_inner_inner) {
K0_shared[(((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + ax0_ax1_ax2_fused_fused_inner_inner_inner)] = K0[(((((rs0_outer * 4384) + (((int)threadIdx.z) * 1096)) + (((int)threadIdx.y) * 274)) + (ax0_ax1_ax2_fused_fused_inner_inner_inner * 137)) + rr_outer)];
}
for (int ax0_ax1_ax2_fused_fused_inner_inner_inner1 = 0; ax0_ax1_ax2_fused_fused_inner_inner_inner1 < 4; ++ax0_ax1_ax2_fused_fused_inner_inner_inner1) {
K1_shared[(((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 4)) + ax0_ax1_ax2_fused_fused_inner_inner_inner1)] = K1[((((((int)threadIdx.z) * 2192) + (((int)threadIdx.y) * 548)) + (ax0_ax1_ax2_fused_fused_inner_inner_inner1 * 137)) + rr_outer)];
}
if (((int)threadIdx.y) < (1 - ((int)threadIdx.z))) {
if (((int)threadIdx.y) < 1) {
if (((int)threadIdx.y) < ((16 - rs2_outer) - ((int)threadIdx.z))) {
K2_shared[(((int)threadIdx.y) + ((int)threadIdx.z))] = K2[(((((((int)threadIdx.y) * 548) + (((int)threadIdx.z) * 548)) + (rs2_outer * 548)) + (((int)blockIdx.x) * 137)) + rr_outer)];
}
}
}
__syncthreads();
for (int rs0_inner = 0; rs0_inner < 8; ++rs0_inner) {
for (int rs1_inner = 0; rs1_inner < 16; ++rs1_inner) {
Output_local[0] = (Output_local[0] + (((Data_shared[((rs0_inner * 16) + rs1_inner)] * K0_shared[((rs0_inner * 4) + ((int)threadIdx.z))]) * K1_shared[((rs1_inner * 4) + ((int)threadIdx.y))]) * K2_shared[0]));
}
}
}
}
}
Output[(((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 4)) + ((int)blockIdx.x))] = Output_local[0];
}
void DenseCpKernelLauncher(const float* U, const float* K0,
const float* K1, const float* KC, float* V){
dim3 gridDim0(4, 1, 1);
dim3 blockDim0(1, 4, 4);
default_function_kernel0<<<gridDim0, blockDim0>>>(U, K0, K1, KC, V);
cudaDeviceSynchronize();
}
#endif
|
4b89b3ed98029f1c2cca24eb353a5eb5362f233e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
extern __shared__ double3 pCache[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double3 myP = idx < N ? p[idx] : double3{0.0, 0.0, 0.0};
double3 ftot{0.0, 0.0, 0.0};
for (int offset = 0; offset < N; offset += blockDim.x) {
// Copy to shared memory. Be careful not to exceed the total number of bodies.
int blockSize = min((int)blockDim.x, N - offset);
if (threadIdx.x < blockSize)
pCache[threadIdx.x] = p[offset + threadIdx.x];
// Wait till all threads are done preparing pCache. Even though warp
// are synchronized (at least on the architecture that Piz Daint has),
// different warps are not.
__syncthreads();
// Compute. again, be careful not to exceed to total number of bodies N.
// (i goes from 0 to blockSize-1, not to blockDim.x-1).
for (int i = 0; i < blockSize; ++i) {
double dx = pCache[i].x - myP.x;
double dy = pCache[i].y - myP.y;
double dz = pCache[i].z - myP.z;
double inv_r = rsqrt(1e-150 + dx * dx + dy * dy + dz * dz);
double inv_rrr = inv_r * inv_r * inv_r;
ftot.x += dx * inv_rrr;
ftot.y += dy * inv_rrr;
ftot.z += dz * inv_rrr;
}
// Synchronize again, otherwise one warp may start overwriting pCache
// in the next step too early.
__syncthreads();
}
f[idx] = ftot;
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
size_t sharedMemorySize = numThreads * sizeof(double3);
hipLaunchKernelGGL(( computeForcesKernel), dim3(numBlocks), dim3(numThreads), sharedMemorySize, 0, N, p, f);
}
| 4b89b3ed98029f1c2cca24eb353a5eb5362f233e.cu | #include <cuda_runtime.h>
__global__ void computeForcesKernel(int N, const double3 *p, double3 *f) {
extern __shared__ double3 pCache[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double3 myP = idx < N ? p[idx] : double3{0.0, 0.0, 0.0};
double3 ftot{0.0, 0.0, 0.0};
for (int offset = 0; offset < N; offset += blockDim.x) {
// Copy to shared memory. Be careful not to exceed the total number of bodies.
int blockSize = min((int)blockDim.x, N - offset);
if (threadIdx.x < blockSize)
pCache[threadIdx.x] = p[offset + threadIdx.x];
// Wait till all threads are done preparing pCache. Even though warp
// are synchronized (at least on the architecture that Piz Daint has),
// different warps are not.
__syncthreads();
// Compute. again, be careful not to exceed to total number of bodies N.
// (i goes from 0 to blockSize-1, not to blockDim.x-1).
for (int i = 0; i < blockSize; ++i) {
double dx = pCache[i].x - myP.x;
double dy = pCache[i].y - myP.y;
double dz = pCache[i].z - myP.z;
double inv_r = rsqrt(1e-150 + dx * dx + dy * dy + dz * dz);
double inv_rrr = inv_r * inv_r * inv_r;
ftot.x += dx * inv_rrr;
ftot.y += dy * inv_rrr;
ftot.z += dz * inv_rrr;
}
// Synchronize again, otherwise one warp may start overwriting pCache
// in the next step too early.
__syncthreads();
}
f[idx] = ftot;
}
void computeForces(int N, const double3 *p, double3 *f) {
constexpr int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
size_t sharedMemorySize = numThreads * sizeof(double3);
computeForcesKernel<<<numBlocks, numThreads, sharedMemorySize>>>(N, p, f);
}
|
fa188a71580de9ba32bae1991053502f3ae3513a.hip | // !!! This is a file automatically generated by hipify!!!
#include "histdupe.h"
#include <stdio.h>
#include <string>
#include <iostream>
#include <vector>
#include <chrono>
/*
Utility method for launching a CUDA kernel. Performes all the nasty checks, allocation, and copying of data.
*/
hipError_t findDupes(const float*, const float*, const float*, const float*, const int*, const int*, std::vector<Pair>&, int*, const int, const int, const int);
int main(int argc, char* argv[]) {
if (argc < 2) {
fprintf(stderr, "Too few arguments. Expected 1\n\nUsage: %s DATA_PATH\n", argv[0]);
return 1;
}
// Initialize variables
// Maximum number of results to return. Only applies to CUDA launches
int max_results = 1000000;
// Base confidence value for similar pairs
float confidence = 0.95f;
// Maximum color variance per histogram. Used to determine if an image is black-and-white or colorful
float color_variance = 0.25f;
// Number of histograms in the dataset
int N = 50000;
// N subset
int subN = 25000;
// Use CUDA to find similar pair
bool cuda = true;
// Clock used for timing
std::chrono::steady_clock::time_point time;
// Print some diagnostics
std::cout << "Datafile Path: " << argv[1] << std::endl;
std::cout << "N: " << N << std::endl;
std::cout << "Max Results: " << max_results << std::endl;
std::cout << "Confidence: " << confidence << std::endl;
std::cout << "Color Variance: " << color_variance << std::endl;
// Allocate some arrays
std::cout << "Allocating memory..." << std::endl;
time = std::chrono::steady_clock::now();
int* ids1 = new int[subN]; // Mapping of actual index to ID of histogram
int* ids2 = new int[N];
float* data1 = new float[128 * subN];
float* data2 = new float[128 * N]; // End-to-end array of all histograms. Each histogram consists of 128 floats
float* conf1 = new float[subN];
float* conf2 = new float[N]; // Confidence array; allows using stricter confidence for black and white images
std::vector<Pair> pairs; // Vector of similar pairs (to be populated)
std::cout << "Allocated memory in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Read test data from file
std::cout << "Reading data from file: " << argv[1] << "..." << std::endl;
time = std::chrono::steady_clock::now();
FILE* file; // Data file
file = fopen(argv[1], "r"); // Open data file to read
for (int i = 0; i < N; i++) {
fscanf(file, "%d", &ids2[i]); // Read first int as id of histogram
for (int j = 0; j < 128; j++) { // Read 128 floats as histogram elements
fscanf(file, "%f", &data2[i * 128 + j]);
}
}
fclose(file); // Close data file
// Copy data and ids for subset
for (int i = 0; i < subN; i++) {
ids1[i] = ids2[i];
for (int j = 0; j < 128; j++) {
data1[i * 128 + j] = data2[i * 128 + j];
}
}
std::cout << "Read data in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Build confidence array
std::cout << "Building confidence array..." << std::endl;
time = std::chrono::steady_clock::now();
float confidence_square = 1 - (1 - confidence) * (1 - confidence); // Squared confidence for comparing black and white images.
// Generate confidence array
for (int i = 0; i < N; i++) {
float d = 0;
// Compute sum of color variance across histogram
for (int k = 0; k < 32; k++) {
// Ignore alpha values (first 32 floats)
float r = data2[i * 128 + k + 32];
float g = data2[i * 128 + k + 64];
float b = data2[i * 128 + k + 96];
d += __max(__max(r, g), b) - __min(__min(r, g), b);
}
if (d > color_variance) {
conf2[i] = confidence; // Image is colorful, use normal confidence
} else {
conf2[i] = confidence_square; // Image is not colorful, use squared confidence
}
}
// Copy confidences to subset
for (int i = 0; i < subN; i++) {
conf1[i] = conf2[i];
}
std::cout << "Built confidence array in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Find duplicates
std::cout << "Finding duplicates..." << std::endl;
time = std::chrono::steady_clock::now();
hipError_t cudaStatus; // CUDA Status variable
int result_count = 0; // Track number of results
if (cuda) {
// With CUDA
cudaStatus = findDupes(data1, data2, conf1, conf2, ids1, ids2, pairs, &result_count, subN, N, max_results);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Kernel failed!");
return 1;
}
} else {
// Sequentially
for (int i = 0; i < subN; i++) {
for (int j = 0; j < N; j++) {
double d = 0;
for (int k = 0; k < 128; k++) {
d += fabs(data1[i * 128 + k] - data2[j * 128 + k]);
}
d = 1 - (d / 8);
if (d > fmaxf(conf1[i], conf2[j])) { // Use highest confidence value of the two histograms
Pair p;
p.similarity = (float) d;
p.id1 = ids1[i];
p.id2 = ids2[j];
if (p.id1 != p.id2) {
pairs.push_back(p);
result_count++;
}
}
}
}
}
std::cout << "Found duplicates in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Print some results
std::cout << "Found pairs: " << result_count << std::endl;
std::cout << "Example results:" << std::endl;
for (int i = 0; i < __min(result_count, 10); i++) {
std::cout << "\t" << pairs[i].id1 << " - " << pairs[i].id2 << ":\t\t" << pairs[i].similarity << std::endl;
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
// Delete arrays
delete[] data1;
delete[] data2;
delete[] conf1;
delete[] conf2;
delete[] ids1;
delete[] ids2;
return 0;
}
hipError_t findDupes(const float* data1, const float* data2, const float* conf1, const float* conf2, const int* ids1, const int* ids2, std::vector<Pair>& pairs, int* result_count, const int N1, const int N2, const int max_results) {
float* d_data1; // Data device pointer
float* d_data2;
float* d_confidence1; // Confidence device pointer
float* d_confidence2;
int* d_ids1;
int* d_ids2;
int* d_results_id1;
int* d_results_id2;
float* d_results_similarity;
int* d_result_count; // Result count device pointer
hipError_t cudaStatus; // CUDA error
std::chrono::steady_clock::time_point time; // Time tracking
int dN = N1; // Padded device N to match block size
if (N1 % 64 != 0) {
dN = (int) ceil((double) N1 / 64) * 64;
}
std::cout << "Adjusted N1: " << dN << std::endl;
// Choose which GPU to run on, change this on a multi-GPU system
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed!");
goto Error;
}
// Allocate GPU buffers
std::cout << "Allocating GPU memory..." << std::endl;
time = std::chrono::steady_clock::now();
cudaStatus = hipMalloc((void**) &d_data1, sizeof(float) * 128 * dN); // Allocate memory for histogram data
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_data2, sizeof(float) * 128 * N2); // Allocate memory for histogram data
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_confidence1, sizeof(float) * dN); // Allocate memory for confidence array
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_confidence2, sizeof(float) * N2); // Allocate memory for confidence array
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_ids1, sizeof(int) * dN); // Allocate memory for ids array
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_ids2, sizeof(int) * N2); // Allocate memory for ids array
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**) &d_results_id1, sizeof(int) * max_results); // Allocate memory for results
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_results_id2, sizeof(int) * max_results); // Allocate memory for results
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&d_results_similarity, sizeof(float) * max_results); // Allocate memory for results
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**) &d_result_count, sizeof(int)); // Allocate single int for result count
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
std::cout << "Allocated GPU memory in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Copy input data from host memory to GPU buffers
std::cout << "Copying data to device..." << std::endl;
time = std::chrono::steady_clock::now();
cudaStatus = hipMemcpy(d_data1, data1, sizeof(int) * 128 * N1, hipMemcpyHostToDevice); // Copy histogram data to device
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_data2, data2, sizeof(int) * 128 * N2, hipMemcpyHostToDevice); // Copy histogram data to device
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_confidence1, conf1, sizeof(float) * N1, hipMemcpyHostToDevice); // Copy confidence array to device
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_confidence2, conf2, sizeof(float) * N2, hipMemcpyHostToDevice); // Copy confidence array to device
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
if (dN > N1) {
// Copy padded data to device at end of confidence array
float* temp_conf = new float[dN - N1]; // Temp array of padded confidence values
for (int i = 0; i < dN - N1; i++) temp_conf[i] = 2; // Impossible confidence
cudaStatus = hipMemcpy(d_confidence1 + N1, temp_conf, sizeof(float) * (dN - N1), hipMemcpyHostToDevice); // Copy padded confidence values to device
delete[] temp_conf; // Delete temp array
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
}
cudaStatus = hipMemcpy(d_ids1, ids1, sizeof(int) * N1, hipMemcpyHostToDevice); // Copy ids array to device
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(d_ids2, ids2, sizeof(int) * N2, hipMemcpyHostToDevice); // Copy ids array to device
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
std::cout << "Copied data to GPU memory in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Launch a kernel on the GPU
std::cout << "Launching kernel..." << std::endl;
time = std::chrono::steady_clock::now();
histDupeKernel KERNEL_ARGS((int) ceil((double) N1 / 64), 64) (d_data1, d_data2, d_confidence1, d_confidence2, d_ids1, d_ids2, d_results_id1, d_results_id2, d_results_similarity, d_result_count, N1, N2, max_results); // Launch CUDA kernel
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching kernel!\n", cudaStatus);
goto Error;
}
std::cout << "Ran GPU kernel in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Copy output from GPU buffer to host memory.
std::cout << "Copying results from device..." << std::endl;
time = std::chrono::steady_clock::now();
cudaStatus = hipMemcpy((void*) result_count, d_result_count, sizeof(float), hipMemcpyDeviceToHost); // Copy result count from device
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
result_count[0] = __min(result_count[0], max_results); // Clamp result_count to max_results
// Read result pairs into buffer
{
int* temp_id1 = new int[result_count[0]];
int* temp_id2 = new int[result_count[0]];
float* temp_similarity = new float[result_count[0]];
cudaStatus = hipMemcpy((void*) temp_id1, d_results_id1, sizeof(int) * result_count[0], hipMemcpyDeviceToHost); // Copy results from device
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy((void*)temp_id2, d_results_id2, sizeof(int) * result_count[0], hipMemcpyDeviceToHost); // Copy results from device
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy((void*)temp_similarity, d_results_similarity, sizeof(float) * result_count[0], hipMemcpyDeviceToHost); // Copy results from device
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Only keep pairs that are unique (pairs are commutative)
for (int i = 0; i < result_count[0]; i++) {
bool found = false;
for (const Pair p2 : pairs) {
if ((temp_id1[i] == p2.id1 && temp_id2[i] == p2.id2) || (temp_id1[i] == p2.id2 && temp_id2[i] == p2.id1)) {
found = true;
break;
}
}
if (!found) {
Pair pair;
pair.id1 = temp_id1[i];
pair.id2 = temp_id2[i];
pair.similarity = temp_similarity[i];
pairs.push_back(pair); // Only keep pair if it is unique
}
}
delete[] temp_id1; // Delete temp results buffer
delete[] temp_id2; // Delete temp results buffer
delete[] temp_similarity; // Delete temp results buffer
}
result_count[0] = (int) pairs.size(); // Reset result_count to count of final result set
std::cout << "Retrieved results from GPU memory in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
Error:
// Free cuda memory
std::cout << "Freeing GPU memory..." << std::endl;
time = std::chrono::steady_clock::now();
hipFree(d_data1);
hipFree(d_data2);
hipFree(d_confidence1);
hipFree(d_confidence2);
hipFree(d_ids1);
hipFree(d_ids2);
hipFree(d_results_id1);
hipFree(d_results_id2);
hipFree(d_results_similarity);
hipFree(d_result_count);
std::cout << "Freed GPU memory in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
return cudaStatus;
}
| fa188a71580de9ba32bae1991053502f3ae3513a.cu |
#include "histdupe.h"
#include <stdio.h>
#include <string>
#include <iostream>
#include <vector>
#include <chrono>
/*
Utility method for launching a CUDA kernel. Performes all the nasty checks, allocation, and copying of data.
*/
cudaError_t findDupes(const float*, const float*, const float*, const float*, const int*, const int*, std::vector<Pair>&, int*, const int, const int, const int);
int main(int argc, char* argv[]) {
if (argc < 2) {
fprintf(stderr, "Too few arguments. Expected 1\n\nUsage: %s DATA_PATH\n", argv[0]);
return 1;
}
// Initialize variables
// Maximum number of results to return. Only applies to CUDA launches
int max_results = 1000000;
// Base confidence value for similar pairs
float confidence = 0.95f;
// Maximum color variance per histogram. Used to determine if an image is black-and-white or colorful
float color_variance = 0.25f;
// Number of histograms in the dataset
int N = 50000;
// N subset
int subN = 25000;
// Use CUDA to find similar pair
bool cuda = true;
// Clock used for timing
std::chrono::steady_clock::time_point time;
// Print some diagnostics
std::cout << "Datafile Path: " << argv[1] << std::endl;
std::cout << "N: " << N << std::endl;
std::cout << "Max Results: " << max_results << std::endl;
std::cout << "Confidence: " << confidence << std::endl;
std::cout << "Color Variance: " << color_variance << std::endl;
// Allocate some arrays
std::cout << "Allocating memory..." << std::endl;
time = std::chrono::steady_clock::now();
int* ids1 = new int[subN]; // Mapping of actual index to ID of histogram
int* ids2 = new int[N];
float* data1 = new float[128 * subN];
float* data2 = new float[128 * N]; // End-to-end array of all histograms. Each histogram consists of 128 floats
float* conf1 = new float[subN];
float* conf2 = new float[N]; // Confidence array; allows using stricter confidence for black and white images
std::vector<Pair> pairs; // Vector of similar pairs (to be populated)
std::cout << "Allocated memory in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Read test data from file
std::cout << "Reading data from file: " << argv[1] << "..." << std::endl;
time = std::chrono::steady_clock::now();
FILE* file; // Data file
file = fopen(argv[1], "r"); // Open data file to read
for (int i = 0; i < N; i++) {
fscanf(file, "%d", &ids2[i]); // Read first int as id of histogram
for (int j = 0; j < 128; j++) { // Read 128 floats as histogram elements
fscanf(file, "%f", &data2[i * 128 + j]);
}
}
fclose(file); // Close data file
// Copy data and ids for subset
for (int i = 0; i < subN; i++) {
ids1[i] = ids2[i];
for (int j = 0; j < 128; j++) {
data1[i * 128 + j] = data2[i * 128 + j];
}
}
std::cout << "Read data in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Build confidence array
std::cout << "Building confidence array..." << std::endl;
time = std::chrono::steady_clock::now();
float confidence_square = 1 - (1 - confidence) * (1 - confidence); // Squared confidence for comparing black and white images.
// Generate confidence array
for (int i = 0; i < N; i++) {
float d = 0;
// Compute sum of color variance across histogram
for (int k = 0; k < 32; k++) {
// Ignore alpha values (first 32 floats)
float r = data2[i * 128 + k + 32];
float g = data2[i * 128 + k + 64];
float b = data2[i * 128 + k + 96];
d += __max(__max(r, g), b) - __min(__min(r, g), b);
}
if (d > color_variance) {
conf2[i] = confidence; // Image is colorful, use normal confidence
} else {
conf2[i] = confidence_square; // Image is not colorful, use squared confidence
}
}
// Copy confidences to subset
for (int i = 0; i < subN; i++) {
conf1[i] = conf2[i];
}
std::cout << "Built confidence array in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Find duplicates
std::cout << "Finding duplicates..." << std::endl;
time = std::chrono::steady_clock::now();
cudaError_t cudaStatus; // CUDA Status variable
int result_count = 0; // Track number of results
if (cuda) {
// With CUDA
cudaStatus = findDupes(data1, data2, conf1, conf2, ids1, ids2, pairs, &result_count, subN, N, max_results);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Kernel failed!");
return 1;
}
} else {
// Sequentially
for (int i = 0; i < subN; i++) {
for (int j = 0; j < N; j++) {
double d = 0;
for (int k = 0; k < 128; k++) {
d += fabs(data1[i * 128 + k] - data2[j * 128 + k]);
}
d = 1 - (d / 8);
if (d > fmaxf(conf1[i], conf2[j])) { // Use highest confidence value of the two histograms
Pair p;
p.similarity = (float) d;
p.id1 = ids1[i];
p.id2 = ids2[j];
if (p.id1 != p.id2) {
pairs.push_back(p);
result_count++;
}
}
}
}
}
std::cout << "Found duplicates in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Print some results
std::cout << "Found pairs: " << result_count << std::endl;
std::cout << "Example results:" << std::endl;
for (int i = 0; i < __min(result_count, 10); i++) {
std::cout << "\t" << pairs[i].id1 << " - " << pairs[i].id2 << ":\t\t" << pairs[i].similarity << std::endl;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
// Delete arrays
delete[] data1;
delete[] data2;
delete[] conf1;
delete[] conf2;
delete[] ids1;
delete[] ids2;
return 0;
}
cudaError_t findDupes(const float* data1, const float* data2, const float* conf1, const float* conf2, const int* ids1, const int* ids2, std::vector<Pair>& pairs, int* result_count, const int N1, const int N2, const int max_results) {
float* d_data1; // Data device pointer
float* d_data2;
float* d_confidence1; // Confidence device pointer
float* d_confidence2;
int* d_ids1;
int* d_ids2;
int* d_results_id1;
int* d_results_id2;
float* d_results_similarity;
int* d_result_count; // Result count device pointer
cudaError_t cudaStatus; // CUDA error
std::chrono::steady_clock::time_point time; // Time tracking
int dN = N1; // Padded device N to match block size
if (N1 % 64 != 0) {
dN = (int) ceil((double) N1 / 64) * 64;
}
std::cout << "Adjusted N1: " << dN << std::endl;
// Choose which GPU to run on, change this on a multi-GPU system
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed!");
goto Error;
}
// Allocate GPU buffers
std::cout << "Allocating GPU memory..." << std::endl;
time = std::chrono::steady_clock::now();
cudaStatus = cudaMalloc((void**) &d_data1, sizeof(float) * 128 * dN); // Allocate memory for histogram data
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_data2, sizeof(float) * 128 * N2); // Allocate memory for histogram data
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_confidence1, sizeof(float) * dN); // Allocate memory for confidence array
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_confidence2, sizeof(float) * N2); // Allocate memory for confidence array
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_ids1, sizeof(int) * dN); // Allocate memory for ids array
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_ids2, sizeof(int) * N2); // Allocate memory for ids array
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &d_results_id1, sizeof(int) * max_results); // Allocate memory for results
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_results_id2, sizeof(int) * max_results); // Allocate memory for results
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&d_results_similarity, sizeof(float) * max_results); // Allocate memory for results
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &d_result_count, sizeof(int)); // Allocate single int for result count
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
std::cout << "Allocated GPU memory in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Copy input data from host memory to GPU buffers
std::cout << "Copying data to device..." << std::endl;
time = std::chrono::steady_clock::now();
cudaStatus = cudaMemcpy(d_data1, data1, sizeof(int) * 128 * N1, cudaMemcpyHostToDevice); // Copy histogram data to device
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_data2, data2, sizeof(int) * 128 * N2, cudaMemcpyHostToDevice); // Copy histogram data to device
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_confidence1, conf1, sizeof(float) * N1, cudaMemcpyHostToDevice); // Copy confidence array to device
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_confidence2, conf2, sizeof(float) * N2, cudaMemcpyHostToDevice); // Copy confidence array to device
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
if (dN > N1) {
// Copy padded data to device at end of confidence array
float* temp_conf = new float[dN - N1]; // Temp array of padded confidence values
for (int i = 0; i < dN - N1; i++) temp_conf[i] = 2; // Impossible confidence
cudaStatus = cudaMemcpy(d_confidence1 + N1, temp_conf, sizeof(float) * (dN - N1), cudaMemcpyHostToDevice); // Copy padded confidence values to device
delete[] temp_conf; // Delete temp array
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
}
cudaStatus = cudaMemcpy(d_ids1, ids1, sizeof(int) * N1, cudaMemcpyHostToDevice); // Copy ids array to device
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(d_ids2, ids2, sizeof(int) * N2, cudaMemcpyHostToDevice); // Copy ids array to device
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
std::cout << "Copied data to GPU memory in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Launch a kernel on the GPU
std::cout << "Launching kernel..." << std::endl;
time = std::chrono::steady_clock::now();
histDupeKernel KERNEL_ARGS((int) ceil((double) N1 / 64), 64) (d_data1, d_data2, d_confidence1, d_confidence2, d_ids1, d_ids2, d_results_id1, d_results_id2, d_results_similarity, d_result_count, N1, N2, max_results); // Launch CUDA kernel
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching kernel!\n", cudaStatus);
goto Error;
}
std::cout << "Ran GPU kernel in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
// Copy output from GPU buffer to host memory.
std::cout << "Copying results from device..." << std::endl;
time = std::chrono::steady_clock::now();
cudaStatus = cudaMemcpy((void*) result_count, d_result_count, sizeof(float), cudaMemcpyDeviceToHost); // Copy result count from device
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
result_count[0] = __min(result_count[0], max_results); // Clamp result_count to max_results
// Read result pairs into buffer
{
int* temp_id1 = new int[result_count[0]];
int* temp_id2 = new int[result_count[0]];
float* temp_similarity = new float[result_count[0]];
cudaStatus = cudaMemcpy((void*) temp_id1, d_results_id1, sizeof(int) * result_count[0], cudaMemcpyDeviceToHost); // Copy results from device
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy((void*)temp_id2, d_results_id2, sizeof(int) * result_count[0], cudaMemcpyDeviceToHost); // Copy results from device
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy((void*)temp_similarity, d_results_similarity, sizeof(float) * result_count[0], cudaMemcpyDeviceToHost); // Copy results from device
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Only keep pairs that are unique (pairs are commutative)
for (int i = 0; i < result_count[0]; i++) {
bool found = false;
for (const Pair p2 : pairs) {
if ((temp_id1[i] == p2.id1 && temp_id2[i] == p2.id2) || (temp_id1[i] == p2.id2 && temp_id2[i] == p2.id1)) {
found = true;
break;
}
}
if (!found) {
Pair pair;
pair.id1 = temp_id1[i];
pair.id2 = temp_id2[i];
pair.similarity = temp_similarity[i];
pairs.push_back(pair); // Only keep pair if it is unique
}
}
delete[] temp_id1; // Delete temp results buffer
delete[] temp_id2; // Delete temp results buffer
delete[] temp_similarity; // Delete temp results buffer
}
result_count[0] = (int) pairs.size(); // Reset result_count to count of final result set
std::cout << "Retrieved results from GPU memory in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
Error:
// Free cuda memory
std::cout << "Freeing GPU memory..." << std::endl;
time = std::chrono::steady_clock::now();
cudaFree(d_data1);
cudaFree(d_data2);
cudaFree(d_confidence1);
cudaFree(d_confidence2);
cudaFree(d_ids1);
cudaFree(d_ids2);
cudaFree(d_results_id1);
cudaFree(d_results_id2);
cudaFree(d_results_similarity);
cudaFree(d_result_count);
std::cout << "Freed GPU memory in: " << std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - time).count() << " ms" << std::endl;
return cudaStatus;
}
|
92ce38bc089d426255f3573fde8b3af07660642b.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "generic/VolumetricFullConvolution.cu"
#include "THHGenerateFloatTypes.h"
| 92ce38bc089d426255f3573fde8b3af07660642b.cu | #include "THCUNN.h"
#include "common.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "generic/VolumetricFullConvolution.cu"
#include "THCGenerateFloatTypes.h"
|
972386fed3109321081395ed2e0aa4b3acbcc3f2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <VaryTypeAndOperator.h>
#include <PrintOutput.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const int *A, int *C, int condition, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int d_index=blockDim.x * blockIdx.x + threadIdx.x;
int tempBitMap=0;
for (int j=0 ; j< 32; j++){
tempBitMap= tempBitMap <<1;
//int t_index=i+ blockDim.x *gridDim.x*j ;
int t_index=i*j ;
if(t_index< numElements){
if(A[t_index]> condition){
tempBitMap = tempBitMap | 1;
}
}
}
C[d_index]=tempBitMap;
}
/**
* Host main routine
*/
int
main(int argc, char * argv[])
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
int numElements = 50000;
if (checkCmdLineFlag(argc, (const char **)argv, "nIter"))
{
nIter = getCmdLineArgumentInt(argc, (const char **)argv, "nIter");
}else{
#ifndef METRIC_RUN_ONLY_ONCE
nIter = 30;
#else
nIter = 1;
#endif
}
if (checkCmdLineFlag(argc, (const char **)argv, "Num"))
{
numElements = getCmdLineArgumentInt(argc, (const char **)argv, "Num");
}
// Print the vector length to be used, and compute its size
size_t size = numElements * sizeof(int);
printf("[Vector addition of %d elements]\n", numElements);
noWarmUp=checkCmdLineFlag(argc, (const char **)argv, "NoWarmUp");
// Allocate the host input vector A
int *h_A = (int *)malloc(size);
// Allocate the host input vector B
//int *h_B = (int *)malloc(size);
// Allocate the host output vector C
int *h_C = (int *)malloc(size/32);
// Verify that allocations succeeded
if (h_A == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = (int)rand()/RAND_MAX;
//h_B[i] = (int)rand()/RAND_MAX;
}
// Allocate the device input vector A
int *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
/*
int *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
// Allocate the device output vector C
int *d_C = NULL;
err = hipMalloc((void **)&d_C, size/32);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements/32 + threadsPerBlock - 1) / (threadsPerBlock);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//--------------profiling
float msecTotal = 0.0f;
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
int condition =10;
for(int t=0; t< nIter; t++)
{
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_C, condition , numElements);
}
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
err = hipGetLastError();
double msec = msecTotal / nIter;
sizeInGBytes= (sizeof(int)*numElements*2)* 1.0e-9;
if(msec!=0){
gigaProcessedInSec=( sizeInGBytes / (msec / 1000.0f));
}
outPutSizeInGBytes=sizeof(int)*numElements*1.0e-9;
timeInMsec=msec;
printOutput();
printf("nIter %d\n", nIter);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vectorAdd
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size/32, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
/*
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
*/
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
// free(h_B);
free(h_C);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
| 972386fed3109321081395ed2e0aa4b3acbcc3f2.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <VaryTypeAndOperator.h>
#include <PrintOutput.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const int *A, int *C, int condition, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int d_index=blockDim.x * blockIdx.x + threadIdx.x;
int tempBitMap=0;
for (int j=0 ; j< 32; j++){
tempBitMap= tempBitMap <<1;
//int t_index=i+ blockDim.x *gridDim.x*j ;
int t_index=i*j ;
if(t_index< numElements){
if(A[t_index]> condition){
tempBitMap = tempBitMap | 1;
}
}
}
C[d_index]=tempBitMap;
}
/**
* Host main routine
*/
int
main(int argc, char * argv[])
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int numElements = 50000;
if (checkCmdLineFlag(argc, (const char **)argv, "nIter"))
{
nIter = getCmdLineArgumentInt(argc, (const char **)argv, "nIter");
}else{
#ifndef METRIC_RUN_ONLY_ONCE
nIter = 30;
#else
nIter = 1;
#endif
}
if (checkCmdLineFlag(argc, (const char **)argv, "Num"))
{
numElements = getCmdLineArgumentInt(argc, (const char **)argv, "Num");
}
// Print the vector length to be used, and compute its size
size_t size = numElements * sizeof(int);
printf("[Vector addition of %d elements]\n", numElements);
noWarmUp=checkCmdLineFlag(argc, (const char **)argv, "NoWarmUp");
// Allocate the host input vector A
int *h_A = (int *)malloc(size);
// Allocate the host input vector B
//int *h_B = (int *)malloc(size);
// Allocate the host output vector C
int *h_C = (int *)malloc(size/32);
// Verify that allocations succeeded
if (h_A == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = (int)rand()/RAND_MAX;
//h_B[i] = (int)rand()/RAND_MAX;
}
// Allocate the device input vector A
int *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
/*
int *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
// Allocate the device output vector C
int *d_C = NULL;
err = cudaMalloc((void **)&d_C, size/32);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements/32 + threadsPerBlock - 1) / (threadsPerBlock);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//--------------profiling
float msecTotal = 0.0f;
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
int condition =10;
for(int t=0; t< nIter; t++)
{
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, condition , numElements);
}
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
err = cudaGetLastError();
double msec = msecTotal / nIter;
sizeInGBytes= (sizeof(int)*numElements*2)* 1.0e-9;
if(msec!=0){
gigaProcessedInSec=( sizeInGBytes / (msec / 1000.0f));
}
outPutSizeInGBytes=sizeof(int)*numElements*1.0e-9;
timeInMsec=msec;
printOutput();
printf("nIter %d\n", nIter);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vectorAdd
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size/32, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
/*
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
*/
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
// free(h_B);
free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
0204dc36867816baa478ec0bc031065d9a3efbc0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define USE_CUDNN 1
#ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = (*top)[i]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + weight_offset_ * g,
conv_descs_[i],
top_descs_[i], top_data + top_offset_ * g,
CUDNN_RESULT_NO_ACCUMULATE));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
Dtype alpha = 1.;
CUDNN_CHECK(cudnnAddTensor4d(handle_[g], CUDNN_ADD_SAME_C, &alpha,
bias_desc_, bias_data + bias_offset_ * g,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff);
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff);
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
top_descs_[i], top_diff + top_offset_ * g,
bias_desc_, bias_diff + bias_offset_ * g,
CUDNN_RESULT_ACCUMULATE));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = (*bottom)[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g],
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
filter_desc_, weight_diff + weight_offset_ * g,
CUDNN_RESULT_ACCUMULATE));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g],
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bottom_descs_[i], bottom_diff + bottom_offset_ * g,
CUDNN_RESULT_NO_ACCUMULATE));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_CLASS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| 0204dc36867816baa478ec0bc031065d9a3efbc0.cu | #define USE_CUDNN 1
#ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = (*top)[i]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + weight_offset_ * g,
conv_descs_[i],
top_descs_[i], top_data + top_offset_ * g,
CUDNN_RESULT_NO_ACCUMULATE));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
Dtype alpha = 1.;
CUDNN_CHECK(cudnnAddTensor4d(handle_[g], CUDNN_ADD_SAME_C, &alpha,
bias_desc_, bias_data + bias_offset_ * g,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff);
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff);
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
top_descs_[i], top_diff + top_offset_ * g,
bias_desc_, bias_diff + bias_offset_ * g,
CUDNN_RESULT_ACCUMULATE));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = (*bottom)[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g],
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
filter_desc_, weight_diff + weight_offset_ * g,
CUDNN_RESULT_ACCUMULATE));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g],
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bottom_descs_[i], bottom_diff + bottom_offset_ * g,
CUDNN_RESULT_NO_ACCUMULATE));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_CLASS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
35e3a3f031491410196d09e41e53de68fc0df043.hip | // !!! This is a file automatically generated by hipify!!!
#define LIMIT -999
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "needle.h"
#include <hip/hip_runtime.h>
#include <sys/time.h>
// includes, kernels
#include "needle_kernel.hip"
#include "../benchmark_common.h"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag);
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
double gettime_nw() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
//int
//main( int argc, char** argv)
int main_nw(hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag)
{
runTest(stream_app, mutexapp, flag);
//return EXIT_SUCCESS;
return 0;
}
/*void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
exit(1);
}*/
void runTest( hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag)
{
int max_rows, max_cols, penalty;
int *input_itemsets, *output_itemsets, *referrence;
int *matrix_cuda, *matrix_cuda_out, *referrence_cuda;
int size;
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
/*if (argc == 3)
{
max_rows = atoi(argv[1]);
max_cols = atoi(argv[1]);
penalty = atoi(argv[2]);
}
else{
usage(argc, argv);
}
if(atoi(argv[1])%16!=0){
fprintf(stderr,"The dimension values must be a multiple of 16\n");
exit(1);
}*/
max_rows= 2048;
max_cols= 2048;
penalty = 10;
max_rows = max_rows + 1;
max_cols = max_cols + 1;
referrence = (int *)malloc( max_rows * max_cols * sizeof(int) );
input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
if (!input_itemsets)
fprintf(stderr, "error: can not allocate memory");
srand ( 7 );
for (int i = 0 ; i < max_cols; i++){
for (int j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
}
}
printf("Start Needleman-Wunsch\n");
for( int i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( int j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (int i = 1 ; i < max_cols; i++){
for (int j = 1 ; j < max_rows; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( int i = 1; i< max_rows ; i++)
input_itemsets[i*max_cols] = -i * penalty;
for( int j = 1; j< max_cols ; j++)
input_itemsets[j] = -j * penalty;
size = max_cols * max_rows;
hipMalloc((void**)& referrence_cuda, sizeof(int)*size);
hipMalloc((void**)& matrix_cuda, sizeof(int)*size);
hipMalloc((void**)& matrix_cuda_out, sizeof(int)*size);
hipMemcpyAsync(referrence_cuda, referrence, sizeof(int) * size, hipMemcpyHostToDevice, stream_app);
hipMemcpyAsync(matrix_cuda, input_itemsets, sizeof(int) * size, hipMemcpyHostToDevice, stream_app);
dim3 dimGrid;
dim3 dimBlock(BLOCK_SIZE, 1);
int block_width = ( max_cols - 1 )/BLOCK_SIZE;
printf("Processing top-left matrix\n");
//process top-left matrix
for( int i = 1 ; i <= block_width ; i++){
dimGrid.x = i;
dimGrid.y = 1;
hipLaunchKernelGGL(( needle_cuda_shared_1), dim3(dimGrid), dim3(dimBlock),0, stream_app, referrence_cuda, matrix_cuda, matrix_cuda_out
,max_cols, penalty, i, block_width);
}
printf("Processing bottom-right matrix\n");
//process bottom-right matrix
for( int i = block_width - 1 ; i >= 1 ; i--){
dimGrid.x = i;
dimGrid.y = 1;
hipLaunchKernelGGL(( needle_cuda_shared_2), dim3(dimGrid), dim3(dimBlock),0, stream_app, referrence_cuda, matrix_cuda, matrix_cuda_out
,max_cols, penalty, i, block_width);
}
pthread_mutex_unlock (mutexapp);
if(flag)
cutilSafeCall( hipStreamSynchronize(stream_app) );
hipMemcpyAsync(output_itemsets, matrix_cuda, sizeof(int) * size, hipMemcpyDeviceToHost, stream_app);
#ifdef TRACE
printf("print traceback value GPU:\n");
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
printf("%d ", output_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
}
else{
}
traceback = maximum(nw, w, n);
printf("%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;}
else
;
}
printf("\n");
#endif
hipFree(referrence_cuda);
hipFree(matrix_cuda);
hipFree(matrix_cuda_out);
}
| 35e3a3f031491410196d09e41e53de68fc0df043.cu | #define LIMIT -999
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "needle.h"
#include <cuda.h>
#include <sys/time.h>
// includes, kernels
#include "needle_kernel.cu"
#include "../benchmark_common.h"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag);
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
double gettime_nw() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
//int
//main( int argc, char** argv)
int main_nw(cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag)
{
runTest(stream_app, mutexapp, flag);
//return EXIT_SUCCESS;
return 0;
}
/*void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
exit(1);
}*/
void runTest( cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag)
{
int max_rows, max_cols, penalty;
int *input_itemsets, *output_itemsets, *referrence;
int *matrix_cuda, *matrix_cuda_out, *referrence_cuda;
int size;
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
/*if (argc == 3)
{
max_rows = atoi(argv[1]);
max_cols = atoi(argv[1]);
penalty = atoi(argv[2]);
}
else{
usage(argc, argv);
}
if(atoi(argv[1])%16!=0){
fprintf(stderr,"The dimension values must be a multiple of 16\n");
exit(1);
}*/
max_rows= 2048;
max_cols= 2048;
penalty = 10;
max_rows = max_rows + 1;
max_cols = max_cols + 1;
referrence = (int *)malloc( max_rows * max_cols * sizeof(int) );
input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
if (!input_itemsets)
fprintf(stderr, "error: can not allocate memory");
srand ( 7 );
for (int i = 0 ; i < max_cols; i++){
for (int j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
}
}
printf("Start Needleman-Wunsch\n");
for( int i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( int j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (int i = 1 ; i < max_cols; i++){
for (int j = 1 ; j < max_rows; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( int i = 1; i< max_rows ; i++)
input_itemsets[i*max_cols] = -i * penalty;
for( int j = 1; j< max_cols ; j++)
input_itemsets[j] = -j * penalty;
size = max_cols * max_rows;
cudaMalloc((void**)& referrence_cuda, sizeof(int)*size);
cudaMalloc((void**)& matrix_cuda, sizeof(int)*size);
cudaMalloc((void**)& matrix_cuda_out, sizeof(int)*size);
cudaMemcpyAsync(referrence_cuda, referrence, sizeof(int) * size, cudaMemcpyHostToDevice, stream_app);
cudaMemcpyAsync(matrix_cuda, input_itemsets, sizeof(int) * size, cudaMemcpyHostToDevice, stream_app);
dim3 dimGrid;
dim3 dimBlock(BLOCK_SIZE, 1);
int block_width = ( max_cols - 1 )/BLOCK_SIZE;
printf("Processing top-left matrix\n");
//process top-left matrix
for( int i = 1 ; i <= block_width ; i++){
dimGrid.x = i;
dimGrid.y = 1;
needle_cuda_shared_1<<<dimGrid, dimBlock,0, stream_app>>>(referrence_cuda, matrix_cuda, matrix_cuda_out
,max_cols, penalty, i, block_width);
}
printf("Processing bottom-right matrix\n");
//process bottom-right matrix
for( int i = block_width - 1 ; i >= 1 ; i--){
dimGrid.x = i;
dimGrid.y = 1;
needle_cuda_shared_2<<<dimGrid, dimBlock,0, stream_app>>>(referrence_cuda, matrix_cuda, matrix_cuda_out
,max_cols, penalty, i, block_width);
}
pthread_mutex_unlock (mutexapp);
if(flag)
cutilSafeCall( cudaStreamSynchronize(stream_app) );
cudaMemcpyAsync(output_itemsets, matrix_cuda, sizeof(int) * size, cudaMemcpyDeviceToHost, stream_app);
#ifdef TRACE
printf("print traceback value GPU:\n");
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
printf("%d ", output_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
}
else{
}
traceback = maximum(nw, w, n);
printf("%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;}
else
;
}
printf("\n");
#endif
cudaFree(referrence_cuda);
cudaFree(matrix_cuda);
cudaFree(matrix_cuda_out);
}
|
329afa62f8b7b230abf992c552f20a2f808d5150.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void kernel(void){
}
int main(int argc, char** argv){
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
printf("Hello World!\n");
return 0;
}
| 329afa62f8b7b230abf992c552f20a2f808d5150.cu | #include <stdio.h>
__global__ void kernel(void){
}
int main(int argc, char** argv){
kernel<<<1,1>>>();
printf("Hello World!\n");
return 0;
}
|
57fd3c9e9460e2f006e21ba6f0b0ea7fb3927db4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "main.h"
// Warp reduction functions
#if __CUDA_ARCH__ >= 300
__device__ inline float warpReduce(float value, int laneID){
// Use XOR mode to perform butterfly reduction
#pragma unroll
for (int i=16; i>=1; i/=2)
value += __shfl_xor(value, i, 32);
return value;
}
#else
__device__ inline float warpReduce(float value, int laneID){
volatile __shared__ float values[1024];
values[threadIdx.x] = 0.0;
values[threadIdx.x] = value;
if(laneID < 16){
for(int i=16; i>=1; i/=2){
values[threadIdx.x] += values[threadIdx.x+i];
}
}
return values[threadIdx.x];
}
#endif
void callSquareSumVector(float *srcMatrix,
float *sqSumVector,
int M,
int K,
int maxGridSize
) {
dim3 gridSize;
dim3 blockSize;
gridSize.x = 1;
gridSize.y = min(M,maxGridSize);
blockSize.x = min(1024,max(32,(K/32)*32));
blockSize.y = 1;
hipLaunchKernelGGL(( calcSquareSumVector), dim3(gridSize),dim3(blockSize), 0, 0, srcMatrix,sqSumVector,M,K);
}
// Square-sum reduction of matrix rows kernel
__global__ void calcSquareSumVector(float *srcMatrix,
float *sqSumVector,
int M,
int K){
// Shared data
volatile __shared__ float sdata[32];
// Calculate thread index and stride
int laneId = threadIdx.x & 0x1f;
int icol = threadIdx.x;
int stride = blockDim.x;
int warpId = threadIdx.x/32;
// Initialize shared data
if(warpId == 0)
sdata[laneId] = 0;
__syncthreads();
// Split rows amongst thread blocks
for(int row = blockIdx.y;
row < M;
row += gridDim.y){
// Thread-Local sum
float mySqSum = 0.0;
// Strided reduction of squared values across columns
for(int col = icol;
col < K + blockDim.x;
col += stride){
// Square the assignmed matrix cell
float val = (col >= K) ? 0.0 : srcMatrix[K*row + col];
float sqVal = val*val;
// Add to thread-local sum
mySqSum += sqVal;
}
// Warp-level reduction with butterfly shuffles
float warpSqSum = warpReduce(mySqSum,laneId);
// Store warp-local square-sum
if(laneId == 0){
sdata[warpId] = warpSqSum;
}
__syncthreads();
// Lowest work finishes off work
if(warpId == 0){
// Read warp-local square-sums
mySqSum = sdata[laneId];
//printf("===%3d %3d %3d %5.2f\n", row, warpId, laneId, mySqSum);
// Add to block-local square sums
float blkSqSum = warpReduce(mySqSum,laneId);
// Store result
if(laneId == 0){
sqSumVector[row] = blkSqSum;
}
}
}
}
| 57fd3c9e9460e2f006e21ba6f0b0ea7fb3927db4.cu | #include "main.h"
// Warp reduction functions
#if __CUDA_ARCH__ >= 300
__device__ inline float warpReduce(float value, int laneID){
// Use XOR mode to perform butterfly reduction
#pragma unroll
for (int i=16; i>=1; i/=2)
value += __shfl_xor(value, i, 32);
return value;
}
#else
__device__ inline float warpReduce(float value, int laneID){
volatile __shared__ float values[1024];
values[threadIdx.x] = 0.0;
values[threadIdx.x] = value;
if(laneID < 16){
for(int i=16; i>=1; i/=2){
values[threadIdx.x] += values[threadIdx.x+i];
}
}
return values[threadIdx.x];
}
#endif
void callSquareSumVector(float *srcMatrix,
float *sqSumVector,
int M,
int K,
int maxGridSize
) {
dim3 gridSize;
dim3 blockSize;
gridSize.x = 1;
gridSize.y = min(M,maxGridSize);
blockSize.x = min(1024,max(32,(K/32)*32));
blockSize.y = 1;
calcSquareSumVector<<<gridSize,blockSize>>>(srcMatrix,sqSumVector,M,K);
}
// Square-sum reduction of matrix rows kernel
__global__ void calcSquareSumVector(float *srcMatrix,
float *sqSumVector,
int M,
int K){
// Shared data
volatile __shared__ float sdata[32];
// Calculate thread index and stride
int laneId = threadIdx.x & 0x1f;
int icol = threadIdx.x;
int stride = blockDim.x;
int warpId = threadIdx.x/32;
// Initialize shared data
if(warpId == 0)
sdata[laneId] = 0;
__syncthreads();
// Split rows amongst thread blocks
for(int row = blockIdx.y;
row < M;
row += gridDim.y){
// Thread-Local sum
float mySqSum = 0.0;
// Strided reduction of squared values across columns
for(int col = icol;
col < K + blockDim.x;
col += stride){
// Square the assignmed matrix cell
float val = (col >= K) ? 0.0 : srcMatrix[K*row + col];
float sqVal = val*val;
// Add to thread-local sum
mySqSum += sqVal;
}
// Warp-level reduction with butterfly shuffles
float warpSqSum = warpReduce(mySqSum,laneId);
// Store warp-local square-sum
if(laneId == 0){
sdata[warpId] = warpSqSum;
}
__syncthreads();
// Lowest work finishes off work
if(warpId == 0){
// Read warp-local square-sums
mySqSum = sdata[laneId];
//printf("===%3d %3d %3d %5.2f\n", row, warpId, laneId, mySqSum);
// Add to block-local square sums
float blkSqSum = warpReduce(mySqSum,laneId);
// Store result
if(laneId == 0){
sqSumVector[row] = blkSqSum;
}
}
}
}
|
ff6621614df1dd683c2fa378fb55c18615708cc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void initialize_rho(float* rho, int size_c, int nc) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int c = blockIdx.y*blockDim.y + threadIdx.y;
if (i < size_c && c < nc) {
rho[c*(size_c)+i] = 0.5f;
}
} | ff6621614df1dd683c2fa378fb55c18615708cc2.cu | #include "includes.h"
__global__ void initialize_rho(float* rho, int size_c, int nc) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int c = blockIdx.y*blockDim.y + threadIdx.y;
if (i < size_c && c < nc) {
rho[c*(size_c)+i] = 0.5f;
}
} |
31da64ff76148f427bc6a60a5f28df72bcf7042a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S2_3.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6407442866583,0.00127024730863006,0.781477837871060,0.781226285372551,0.000173058844459830,0.485844316142820,0.00292517461971129,0.999998371825952,1.91031873007277e-08,1.87288135192733e-05,0.999773522474666,1.00766286802375,0.999999451356628,3.16576129409975e-05,0.737961690357158,10.2441215797546,139.210514590526}; for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5383636643555,0.000359007183612285,0.000154135859579797,0.000217532604523131,0.265156052763393,0.186639850277223,0.149365610424309,3.43320580539409,0.0166941723782826,1.45123160724562,1094.13527370174,0.000494385096732911,0.269171393030809,0.0183256017779276,0.00468024174172971,1.50869252254344e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| 31da64ff76148f427bc6a60a5f28df72bcf7042a.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S2_3.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6407442866583,0.00127024730863006,0.781477837871060,0.781226285372551,0.000173058844459830,0.485844316142820,0.00292517461971129,0.999998371825952,1.91031873007277e-08,1.87288135192733e-05,0.999773522474666,1.00766286802375,0.999999451356628,3.16576129409975e-05,0.737961690357158,10.2441215797546,139.210514590526}; for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5383636643555,0.000359007183612285,0.000154135859579797,0.000217532604523131,0.265156052763393,0.186639850277223,0.149365610424309,3.43320580539409,0.0166941723782826,1.45123160724562,1094.13527370174,0.000494385096732911,0.269171393030809,0.0183256017779276,0.00468024174172971,1.50869252254344e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
32d4281042f66607bbc2bfc4e66554600fb886a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Copyright (c) 2012, Mikhail Sirotenko <[email protected]>
//All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
//ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
//WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
//DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
//DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
//(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
//ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
//(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
//SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../precomp.hpp"
namespace cudacnn
{
template class CLayer<TensorGPU, float, TansigMod<float> >;
template class CLayer<TensorGPU, float, Tansig<float> >;
template class CLayer<TensorGPU, float, Purelin<float> >;
template class CLayer<TensorGPU, double, TansigMod<double> >;
template class CLayer<TensorGPU, double, Tansig<double> >;
template class CLayer<TensorGPU, double, Purelin<double> >;
#ifdef HAVE_CUDA
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::AverageHessian()
{
if(this->num_hessian_accums_) {
dim3 threads(min(512,this->d2e_dw2_.num_elements()),1,1);
dim3 blocks(iDivUp(this->d2e_dw2_.num_elements(),512),1,1);
hipLaunchKernelGGL(( Average<T>), dim3(blocks), dim3(threads), 0, 0, this->d2e_dw2_, this->num_hessian_accums_);
threads = dim3(min(512,this->d2e_db2_.num_elements()),1,1);
blocks = dim3(iDivUp(this->d2e_db2_.num_elements(),512),1,1);
hipLaunchKernelGGL(( Average<T>), dim3(blocks), dim3(threads), 0, 0, this->d2e_db2_, this->num_hessian_accums_);
this->num_hessian_accums_ = 0;
}
}
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::AdaptWeights(T tau, bool use_hessian, T mu)
{
dim3 threads(MAX_THREADS);
if(use_hessian){
dim3 blocks(iDivUp(this->weights().num_elements(),MAX_THREADS));
hipLaunchKernelGGL(( AdaptWeightsKernel<T>), dim3(threads),dim3(blocks), 0, 0, this->weights(), tau, mu, this->de_dw(), this->d2e_dw2());
blocks = dim3(iDivUp(this->biases().num_elements(),MAX_THREADS));
hipLaunchKernelGGL(( AdaptWeightsKernel<T>), dim3(threads),dim3(blocks), 0, 0, this->biases(), tau, mu, this->de_db(), this->d2e_db2());
}else{
dim3 blocks(iDivUp(this->weights().num_elements(),MAX_THREADS));
hipLaunchKernelGGL(( AdaptWeightsKernel<T>), dim3(threads),dim3(blocks), 0, 0, this->weights(), tau, this->de_dw());
blocks = dim3(iDivUp(this->biases().num_elements(),MAX_THREADS));
hipLaunchKernelGGL(( AdaptWeightsKernel<T>), dim3(threads),dim3(blocks), 0, 0, this->biases(), tau, this->de_db());
}
}
//Simple variant. Without extra threads for maximum occupancy
template <class T, class TF, int nthreads>
__global__ void Conv2ValidKernel(const TensorDev3<T> inputs, const TensorDev4<T> kernels, const TensorDev3<T> biases,
const TensorDev2<int> conn_map, TensorDev3<T> outputs)
{
//__shared__ T smem[nthreads*2];
T* smem = SharedMemory<T>();
T* kernels_buf = smem;
T* sum_buf = smem + nthreads;
int kx = threadIdx.x;
int ky = threadIdx.y;
//int km = threadIdx.z;
//output coords
int km = blockIdx.y;
int y = blockIdx.x / outputs.w;
int x = blockIdx.x % outputs.w;
//int tid = threadIdx.z*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x;
int tid = threadIdx.y*blockDim.x + threadIdx.x;
kernels_buf[tid] = 0;
sum_buf[tid] = 0;
T out = 0;
if(kx < kernels.w && ky < kernels.h) {
//Loop for all inputs
for(int i = 0; i < inputs.d; ++i) {
//Load kernel into smem
kernels_buf[tid] = kernels(kx,ky,i,km);
__syncthreads();
sum_buf[tid] = kernels_buf[tid] * inputs(x + kx, y + ky, i);
__syncthreads();
volatile T* vsmem = sum_buf;
SmemReduce<T, nthreads>(vsmem, tid);
__syncthreads();
//Check connection
if(tid == 0){
out += conn_map(i, km) > 0 ? vsmem[tid] : 0;
}
}
}
if(tid == 0){
TF tf;
outputs(x, y, km) = tf(out + biases[km]);
}
}
//TODO: remove limitation on 32x32 maximum kernel zise
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::Propagate(const TensorGPU<T>& layer_input )
{
//TODO: parametrize max threads number
assert(this->weights().w() * this->weights().h() <= MAX_THREADS);
assert(this->con_map().w() == layer_input.d());
assert(this->con_map().h() == this->out().d());
assert(this->weights().d() == this->con_map().w());
assert(this->weights().d2() == this->con_map().h());
dim3 threads(iRoundUpPow2(this->weights().w()),iRoundUpPow2(this->weights().h()),1);
dim3 blocks(this->out().w()*this->out().h(),this->out().d(), 1);
int nthreads = threads.x*threads.y;
size_t smem_size = ::max(nthreads*2*sizeof(T), 64*sizeof(T));
switch(nthreads)
{
case 1 :hipLaunchKernelGGL(( Conv2ValidKernel<T, TF, 1 >), dim3(blocks), dim3(threads), smem_size, 0, layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 2 :hipLaunchKernelGGL(( Conv2ValidKernel<T, TF, 2 >), dim3(blocks), dim3(threads), smem_size, 0, layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 4 :hipLaunchKernelGGL(( Conv2ValidKernel<T, TF, 4 >), dim3(blocks), dim3(threads), smem_size, 0, layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 8 :hipLaunchKernelGGL(( Conv2ValidKernel<T, TF, 8 >), dim3(blocks), dim3(threads), smem_size, 0, layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 16 :hipLaunchKernelGGL(( Conv2ValidKernel<T, TF, 16 >), dim3(blocks), dim3(threads), smem_size, 0, layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 32 :hipLaunchKernelGGL(( Conv2ValidKernel<T, TF, 32 >), dim3(blocks), dim3(threads), smem_size, 0, layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 64 :hipLaunchKernelGGL(( Conv2ValidKernel<T, TF, 64 >), dim3(blocks), dim3(threads), smem_size, 0, layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 128:hipLaunchKernelGGL(( Conv2ValidKernel<T, TF, 128 >), dim3(blocks), dim3(threads), smem_size, 0, layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 256:hipLaunchKernelGGL(( Conv2ValidKernel<T, TF, 256 >), dim3(blocks), dim3(threads), smem_size, 0, layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
default:
throw std::runtime_error("Incorrect threads number in Propagate");
}
cutilCheckMsg("Failed to Propagate in CLayerCuda");
}
template <class T, int nthreads, class TF, bool hessian>
__global__ void BackpropConvKernel(const TensorDev3<T> dedx, const TensorDev4<T> weights, const TensorDev3<T> outs,
const TensorDev2<int> conn_map, unsigned out_idx, TensorDev3<T> de_dx_prev)
{
T* sum_buf = SharedMemory<T>();
int kx = threadIdx.x % weights.w;
int ky = threadIdx.x / weights.w;
//output coords (output is bigger than input)
int ix = blockIdx.x % de_dx_prev.w;
int iy = blockIdx.x / de_dx_prev.w;
int im = blockIdx.y;
int kw = weights.w;
int kh = weights.h;
int y = iy - ky;
int x = ix - kx;
if(conn_map(im, out_idx) == 0) return;
int tid = threadIdx.x;
sum_buf[tid] = 0;
__syncthreads();
if(kx < kw && ky < kh &&
x >= 0 && y >= 0 &&
x < outs.w && y < outs.h) {
//Load kernel into smem
TF tf;
T dedy = hessian ? Sqr(tf.dydx(outs(x,y,out_idx)))*dedx(x, y, out_idx):
tf.dydx(outs(x,y,out_idx))*dedx(x, y, out_idx);
sum_buf[tid] = hessian ? dedy * Sqr(weights(kx, ky,im, out_idx)) : dedy * weights(kx, ky,im, out_idx);
}
__syncthreads();
volatile T* vsmem = sum_buf;
SmemReduce<T, nthreads>(vsmem, tid);
__syncthreads();
if(tid == 0){
de_dx_prev(ix, iy, im) += vsmem[tid];
}
}
template <class T, class TF>
template <bool hessian>
void CLayer<TensorGPU, T, TF>::BackpropagateKernelProxy(const TensorGPU<T>& input, TensorGPU<T>& de_dx_prev)
{
assert(this->weights().w() * this->weights().h() <= MAX_THREADS);
assert(this->con_map().w() * this->con_map().h() == input.d()*this->weights().d2());
assert(this->de_dx_.HaveSameSize(this->out_));
assert(de_dx_prev.HaveSameSize(input));
const TensorGPU<T>& de_dx_t = hessian ? this->d2e_dx2() : this->de_dx();
dim3 threads(iRoundUpPow2(this->weights().w()*this->weights().h()),1,1);
dim3 blocks(input.w()*input.h(),input.d(), 1);
int nthreads = threads.x;
//Mimimum size of smem should be at least 64*sizeof(T) to avoid extra checks in reduction kernel and therefore warp divergence
size_t smem_size = ::max(nthreads*sizeof(T), 64*sizeof(T));
for(unsigned out_idx = 0; out_idx < this->out().d(); ++out_idx){
switch(nthreads)
{
case 1 :hipLaunchKernelGGL(( BackpropConvKernel<T, 1 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 2 :hipLaunchKernelGGL(( BackpropConvKernel<T, 2 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 4 :hipLaunchKernelGGL(( BackpropConvKernel<T, 4 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 8 :hipLaunchKernelGGL(( BackpropConvKernel<T, 8 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 16 :hipLaunchKernelGGL(( BackpropConvKernel<T, 16 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 32 :hipLaunchKernelGGL(( BackpropConvKernel<T, 32 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 64 :hipLaunchKernelGGL(( BackpropConvKernel<T, 64 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 128:hipLaunchKernelGGL(( BackpropConvKernel<T, 128 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 256:hipLaunchKernelGGL(( BackpropConvKernel<T, 256 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
default:
throw std::runtime_error("Incorrect threads number in Propagate");
}
cutilCheckMsg("Failed to Backpropagate in CLayerCuda");
}
}
template <class T, int nthreads, class TF, bool hessian>
__global__ void ComputeGradientKernel(const TensorDev3<T> dedx, const TensorDev4<T> weights, const TensorDev3<T> outs,
const TensorDev2<int> conn_map, const TensorDev3<T> inps, TensorDev4<T> de_dw,
TensorDev3<T> de_db)
{
T *smem = SharedMemory<T>();
//Use many threads of 1 block to process several outputs for increasing occupancy
T* sum_buf = smem + threadIdx.y*(nthreads + outs.w*outs.h);
T* dedy_buf = sum_buf + nthreads;
//#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 200
// int im = blockIdx.y;
// int om = blockIdx.z*blockDim.y + threadIdx.y;
//#else
int im = blockIdx.y % conn_map.w;
int om = (blockIdx.y / conn_map.w)*blockDim.y + threadIdx.y;
//#endif
int kx = blockIdx.x % weights.w;
int ky = blockIdx.x / weights.w;
int tid = threadIdx.x;
int out_size = outs.w * outs.h;
//cuassert(im < conn_map.w);
//cuassert(om < conn_map.h);
//Compute dedy and put into smem buffer
for(int out_idx = 0; out_idx < out_size; out_idx += nthreads){
if(tid + out_idx < out_size){
int ox = (tid + out_idx) % outs.w;
int oy = (tid + out_idx) / outs.w;
TF tf;
T dedy = hessian ? Sqr(tf.dydx(outs(ox,oy,om)))*dedx(ox, oy, om):
tf.dydx(outs(ox,oy,om))*dedx(ox, oy, om);
dedy_buf[tid + out_idx] = dedy;
}
}
__syncthreads();
sum_buf[tid] = 0;
if(conn_map(im, om) != 0) {
//Loop for all outputs
//Prepare dedy * input for reduction
for(int out_idx = 0; out_idx < out_size; out_idx += nthreads){
if(tid + out_idx < out_size){
int ox = (tid + out_idx) % outs.w;
int oy = (tid + out_idx) / outs.w;
T inp = hessian ? Sqr(inps(ox + kx, oy + ky, im)) : inps(ox + kx, oy + ky, im);
sum_buf[tid] += dedy_buf[tid + out_idx] * inp;
}
}
__syncthreads();
volatile T* vsmem = sum_buf;
SmemReduce<T, nthreads>(vsmem, tid);
__syncthreads();
if(tid == 0){
de_dw(kx, ky, im, om) = vsmem[tid];
}
}
//Now compute biases gradient
if(im == 0){
sum_buf[tid] = 0;
for(int out_idx = 0; out_idx < out_size; out_idx += nthreads){
if(tid + out_idx < out_size){
sum_buf[tid] += dedy_buf[tid + out_idx];
}
}
__syncthreads();
volatile T* vsmem = sum_buf;
SmemReduce<T, nthreads>(vsmem, tid);
__syncthreads();
if(tid == 0){
de_db[om] = vsmem[tid];
}
}
}
template <class T, class TF>
template <bool hessian>
void CLayer<TensorGPU, T, TF>::ComputeGradientKernelProxy(const TensorGPU<T>& input)
{
assert(this->con_map().w() * this->con_map().h() == input.d()*this->weights().d2());
const TensorGPU<T>& de_dw_in = hessian ? this->d2e_dw2() : this->de_dw();
const TensorGPU<T>& de_db_in = hessian ? this->d2e_db2() : this->de_db();
const TensorGPU<T>& de_dx_in = hessian ? this->d2e_dx2() : this->de_dx();
//In case when de_dx_ 2d size is greather than max number of threads, loop inside the kernel
//TODO: CC <1.2 have less than 512 maximum threads. Find some solution better than MAX_THREADS/2
int nthreads_per_out = min(iRoundUpPow2(this->de_dx_.w()*this->de_dx_.h()), MAX_THREADS/2);
//Try to inrease occupancy by processing several outputs in one block
int nouts_per_block = max(MAX_OCCUP_THREADS/nthreads_per_out , 1);
//Restrict the number of outputs
nouts_per_block = min(nouts_per_block, this->de_dx_.d());
dim3 threads(nthreads_per_out,nouts_per_block,1);
//Use 3rd dimension if CC>=2.0
//#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 200
// dim3 blocks(this->weights().w()*this->weights().h(),input.d(),this->de_dx_.d()/nouts_per_block);
//#else
dim3 blocks(this->weights().w()*this->weights().h(),input.d()*(this->de_dx_.d()/nouts_per_block), 1);
//#endif
int nthreads = threads.x;
int smem_size = (nthreads + this->out().w()*this->out().h())*nouts_per_block*sizeof(T);
switch(nthreads)
{
case 1 :hipLaunchKernelGGL(( ComputeGradientKernel<T, 1 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 2 :hipLaunchKernelGGL(( ComputeGradientKernel<T, 2 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 4 :hipLaunchKernelGGL(( ComputeGradientKernel<T, 4 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 8 :hipLaunchKernelGGL(( ComputeGradientKernel<T, 8 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 16 :hipLaunchKernelGGL(( ComputeGradientKernel<T, 16 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 32 :hipLaunchKernelGGL(( ComputeGradientKernel<T, 32 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 64 :hipLaunchKernelGGL(( ComputeGradientKernel<T, 64 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 128:hipLaunchKernelGGL(( ComputeGradientKernel<T, 128 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 256:hipLaunchKernelGGL(( ComputeGradientKernel<T, 256 , TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 512:hipLaunchKernelGGL(( ComputeGradientKernel<T, 512, TF, hessian>), dim3(blocks), dim3(threads), smem_size, 0, de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
default:
throw std::runtime_error("Incorrect threads number in ComputeGradientKernelProxy");
}
cutilCheckMsg("Failed to Backpropagate in CLayerCuda");
}
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::BackPropagate(const TensorGPU<T>& input, TensorGPU<T>& dedx_prev )
{
assert(this->de_dw_.HaveSameSize(this->weights()));
assert(this->de_db_.HaveSameSize(this->biases()));
assert(this->de_dx_.HaveSameSize(this->out()));
dedx_prev.ZeroMemory();
BackpropagateKernelProxy<false>(input, dedx_prev);
ComputeGradient(input);
}
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::BackPropagateHessian(const TensorGPU<T>& input, TensorGPU<T>& d2edx2_prev )
{
assert(this->d2e_dw2_.HaveSameSize(this->weights()));
assert(this->d2e_db2_.HaveSameSize(this->biases()));
assert(this->d2e_dx2_.HaveSameSize(this->out()));
d2edx2_prev.ZeroMemory();
BackpropagateKernelProxy<true>(input, d2edx2_prev);
ComputeHessian(input);
}
/* Compute gradient without backpropagating errors */
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::ComputeGradient(const TensorGPU<T>& input)
{
ComputeGradientKernelProxy<false>(input);
}
/* Compute Hessian without backpropagating errors */
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::ComputeHessian(const TensorGPU<T>& input)
{
ComputeGradientKernelProxy<true>(input);
this->num_hessian_accums_++;
}
#endif //HAVE_CUDA
} | 32d4281042f66607bbc2bfc4e66554600fb886a8.cu | //Copyright (c) 2012, Mikhail Sirotenko <[email protected]>
//All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
//ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
//WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
//DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
//DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
//(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
//ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
//(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
//SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../precomp.hpp"
namespace cudacnn
{
template class CLayer<TensorGPU, float, TansigMod<float> >;
template class CLayer<TensorGPU, float, Tansig<float> >;
template class CLayer<TensorGPU, float, Purelin<float> >;
template class CLayer<TensorGPU, double, TansigMod<double> >;
template class CLayer<TensorGPU, double, Tansig<double> >;
template class CLayer<TensorGPU, double, Purelin<double> >;
#ifdef HAVE_CUDA
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::AverageHessian()
{
if(this->num_hessian_accums_) {
dim3 threads(min(512,this->d2e_dw2_.num_elements()),1,1);
dim3 blocks(iDivUp(this->d2e_dw2_.num_elements(),512),1,1);
Average<T><<<blocks, threads>>>(this->d2e_dw2_, this->num_hessian_accums_);
threads = dim3(min(512,this->d2e_db2_.num_elements()),1,1);
blocks = dim3(iDivUp(this->d2e_db2_.num_elements(),512),1,1);
Average<T><<<blocks, threads>>>(this->d2e_db2_, this->num_hessian_accums_);
this->num_hessian_accums_ = 0;
}
}
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::AdaptWeights(T tau, bool use_hessian, T mu)
{
dim3 threads(MAX_THREADS);
if(use_hessian){
dim3 blocks(iDivUp(this->weights().num_elements(),MAX_THREADS));
AdaptWeightsKernel<T><<<threads,blocks>>>(this->weights(), tau, mu, this->de_dw(), this->d2e_dw2());
blocks = dim3(iDivUp(this->biases().num_elements(),MAX_THREADS));
AdaptWeightsKernel<T><<<threads,blocks>>>(this->biases(), tau, mu, this->de_db(), this->d2e_db2());
}else{
dim3 blocks(iDivUp(this->weights().num_elements(),MAX_THREADS));
AdaptWeightsKernel<T><<<threads,blocks>>>(this->weights(), tau, this->de_dw());
blocks = dim3(iDivUp(this->biases().num_elements(),MAX_THREADS));
AdaptWeightsKernel<T><<<threads,blocks>>>(this->biases(), tau, this->de_db());
}
}
//Simple variant. Without extra threads for maximum occupancy
template <class T, class TF, int nthreads>
__global__ void Conv2ValidKernel(const TensorDev3<T> inputs, const TensorDev4<T> kernels, const TensorDev3<T> biases,
const TensorDev2<int> conn_map, TensorDev3<T> outputs)
{
//__shared__ T smem[nthreads*2];
T* smem = SharedMemory<T>();
T* kernels_buf = smem;
T* sum_buf = smem + nthreads;
int kx = threadIdx.x;
int ky = threadIdx.y;
//int km = threadIdx.z;
//output coords
int km = blockIdx.y;
int y = blockIdx.x / outputs.w;
int x = blockIdx.x % outputs.w;
//int tid = threadIdx.z*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x;
int tid = threadIdx.y*blockDim.x + threadIdx.x;
kernels_buf[tid] = 0;
sum_buf[tid] = 0;
T out = 0;
if(kx < kernels.w && ky < kernels.h) {
//Loop for all inputs
for(int i = 0; i < inputs.d; ++i) {
//Load kernel into smem
kernels_buf[tid] = kernels(kx,ky,i,km);
__syncthreads();
sum_buf[tid] = kernels_buf[tid] * inputs(x + kx, y + ky, i);
__syncthreads();
volatile T* vsmem = sum_buf;
SmemReduce<T, nthreads>(vsmem, tid);
__syncthreads();
//Check connection
if(tid == 0){
out += conn_map(i, km) > 0 ? vsmem[tid] : 0;
}
}
}
if(tid == 0){
TF tf;
outputs(x, y, km) = tf(out + biases[km]);
}
}
//TODO: remove limitation on 32x32 maximum kernel zise
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::Propagate(const TensorGPU<T>& layer_input )
{
//TODO: parametrize max threads number
assert(this->weights().w() * this->weights().h() <= MAX_THREADS);
assert(this->con_map().w() == layer_input.d());
assert(this->con_map().h() == this->out().d());
assert(this->weights().d() == this->con_map().w());
assert(this->weights().d2() == this->con_map().h());
dim3 threads(iRoundUpPow2(this->weights().w()),iRoundUpPow2(this->weights().h()),1);
dim3 blocks(this->out().w()*this->out().h(),this->out().d(), 1);
int nthreads = threads.x*threads.y;
size_t smem_size = std::max(nthreads*2*sizeof(T), 64*sizeof(T));
switch(nthreads)
{
case 1 : Conv2ValidKernel<T, TF, 1 ><<<blocks, threads, smem_size>>>(layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 2 : Conv2ValidKernel<T, TF, 2 ><<<blocks, threads, smem_size>>>(layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 4 : Conv2ValidKernel<T, TF, 4 ><<<blocks, threads, smem_size>>>(layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 8 : Conv2ValidKernel<T, TF, 8 ><<<blocks, threads, smem_size>>>(layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 16 : Conv2ValidKernel<T, TF, 16 ><<<blocks, threads, smem_size>>>(layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 32 : Conv2ValidKernel<T, TF, 32 ><<<blocks, threads, smem_size>>>(layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 64 : Conv2ValidKernel<T, TF, 64 ><<<blocks, threads, smem_size>>>(layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 128: Conv2ValidKernel<T, TF, 128 ><<<blocks, threads, smem_size>>>(layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
case 256: Conv2ValidKernel<T, TF, 256 ><<<blocks, threads, smem_size>>>(layer_input, this->weights(), this->biases(),
this->con_map(), this->out_); break;
default:
throw std::runtime_error("Incorrect threads number in Propagate");
}
cutilCheckMsg("Failed to Propagate in CLayerCuda");
}
template <class T, int nthreads, class TF, bool hessian>
__global__ void BackpropConvKernel(const TensorDev3<T> dedx, const TensorDev4<T> weights, const TensorDev3<T> outs,
const TensorDev2<int> conn_map, unsigned out_idx, TensorDev3<T> de_dx_prev)
{
T* sum_buf = SharedMemory<T>();
int kx = threadIdx.x % weights.w;
int ky = threadIdx.x / weights.w;
//output coords (output is bigger than input)
int ix = blockIdx.x % de_dx_prev.w;
int iy = blockIdx.x / de_dx_prev.w;
int im = blockIdx.y;
int kw = weights.w;
int kh = weights.h;
int y = iy - ky;
int x = ix - kx;
if(conn_map(im, out_idx) == 0) return;
int tid = threadIdx.x;
sum_buf[tid] = 0;
__syncthreads();
if(kx < kw && ky < kh &&
x >= 0 && y >= 0 &&
x < outs.w && y < outs.h) {
//Load kernel into smem
TF tf;
T dedy = hessian ? Sqr(tf.dydx(outs(x,y,out_idx)))*dedx(x, y, out_idx):
tf.dydx(outs(x,y,out_idx))*dedx(x, y, out_idx);
sum_buf[tid] = hessian ? dedy * Sqr(weights(kx, ky,im, out_idx)) : dedy * weights(kx, ky,im, out_idx);
}
__syncthreads();
volatile T* vsmem = sum_buf;
SmemReduce<T, nthreads>(vsmem, tid);
__syncthreads();
if(tid == 0){
de_dx_prev(ix, iy, im) += vsmem[tid];
}
}
template <class T, class TF>
template <bool hessian>
void CLayer<TensorGPU, T, TF>::BackpropagateKernelProxy(const TensorGPU<T>& input, TensorGPU<T>& de_dx_prev)
{
assert(this->weights().w() * this->weights().h() <= MAX_THREADS);
assert(this->con_map().w() * this->con_map().h() == input.d()*this->weights().d2());
assert(this->de_dx_.HaveSameSize(this->out_));
assert(de_dx_prev.HaveSameSize(input));
const TensorGPU<T>& de_dx_t = hessian ? this->d2e_dx2() : this->de_dx();
dim3 threads(iRoundUpPow2(this->weights().w()*this->weights().h()),1,1);
dim3 blocks(input.w()*input.h(),input.d(), 1);
int nthreads = threads.x;
//Mimimum size of smem should be at least 64*sizeof(T) to avoid extra checks in reduction kernel and therefore warp divergence
size_t smem_size = std::max(nthreads*sizeof(T), 64*sizeof(T));
for(unsigned out_idx = 0; out_idx < this->out().d(); ++out_idx){
switch(nthreads)
{
case 1 : BackpropConvKernel<T, 1 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 2 : BackpropConvKernel<T, 2 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 4 : BackpropConvKernel<T, 4 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 8 : BackpropConvKernel<T, 8 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 16 : BackpropConvKernel<T, 16 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 32 : BackpropConvKernel<T, 32 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 64 : BackpropConvKernel<T, 64 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 128: BackpropConvKernel<T, 128 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
case 256: BackpropConvKernel<T, 256 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_t, this->weights(), this->out(), this->con_map(), out_idx, de_dx_prev); break;
default:
throw std::runtime_error("Incorrect threads number in Propagate");
}
cutilCheckMsg("Failed to Backpropagate in CLayerCuda");
}
}
template <class T, int nthreads, class TF, bool hessian>
__global__ void ComputeGradientKernel(const TensorDev3<T> dedx, const TensorDev4<T> weights, const TensorDev3<T> outs,
const TensorDev2<int> conn_map, const TensorDev3<T> inps, TensorDev4<T> de_dw,
TensorDev3<T> de_db)
{
T *smem = SharedMemory<T>();
//Use many threads of 1 block to process several outputs for increasing occupancy
T* sum_buf = smem + threadIdx.y*(nthreads + outs.w*outs.h);
T* dedy_buf = sum_buf + nthreads;
//#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 200
// int im = blockIdx.y;
// int om = blockIdx.z*blockDim.y + threadIdx.y;
//#else
int im = blockIdx.y % conn_map.w;
int om = (blockIdx.y / conn_map.w)*blockDim.y + threadIdx.y;
//#endif
int kx = blockIdx.x % weights.w;
int ky = blockIdx.x / weights.w;
int tid = threadIdx.x;
int out_size = outs.w * outs.h;
//cuassert(im < conn_map.w);
//cuassert(om < conn_map.h);
//Compute dedy and put into smem buffer
for(int out_idx = 0; out_idx < out_size; out_idx += nthreads){
if(tid + out_idx < out_size){
int ox = (tid + out_idx) % outs.w;
int oy = (tid + out_idx) / outs.w;
TF tf;
T dedy = hessian ? Sqr(tf.dydx(outs(ox,oy,om)))*dedx(ox, oy, om):
tf.dydx(outs(ox,oy,om))*dedx(ox, oy, om);
dedy_buf[tid + out_idx] = dedy;
}
}
__syncthreads();
sum_buf[tid] = 0;
if(conn_map(im, om) != 0) {
//Loop for all outputs
//Prepare dedy * input for reduction
for(int out_idx = 0; out_idx < out_size; out_idx += nthreads){
if(tid + out_idx < out_size){
int ox = (tid + out_idx) % outs.w;
int oy = (tid + out_idx) / outs.w;
T inp = hessian ? Sqr(inps(ox + kx, oy + ky, im)) : inps(ox + kx, oy + ky, im);
sum_buf[tid] += dedy_buf[tid + out_idx] * inp;
}
}
__syncthreads();
volatile T* vsmem = sum_buf;
SmemReduce<T, nthreads>(vsmem, tid);
__syncthreads();
if(tid == 0){
de_dw(kx, ky, im, om) = vsmem[tid];
}
}
//Now compute biases gradient
if(im == 0){
sum_buf[tid] = 0;
for(int out_idx = 0; out_idx < out_size; out_idx += nthreads){
if(tid + out_idx < out_size){
sum_buf[tid] += dedy_buf[tid + out_idx];
}
}
__syncthreads();
volatile T* vsmem = sum_buf;
SmemReduce<T, nthreads>(vsmem, tid);
__syncthreads();
if(tid == 0){
de_db[om] = vsmem[tid];
}
}
}
template <class T, class TF>
template <bool hessian>
void CLayer<TensorGPU, T, TF>::ComputeGradientKernelProxy(const TensorGPU<T>& input)
{
assert(this->con_map().w() * this->con_map().h() == input.d()*this->weights().d2());
const TensorGPU<T>& de_dw_in = hessian ? this->d2e_dw2() : this->de_dw();
const TensorGPU<T>& de_db_in = hessian ? this->d2e_db2() : this->de_db();
const TensorGPU<T>& de_dx_in = hessian ? this->d2e_dx2() : this->de_dx();
//In case when de_dx_ 2d size is greather than max number of threads, loop inside the kernel
//TODO: CC <1.2 have less than 512 maximum threads. Find some solution better than MAX_THREADS/2
int nthreads_per_out = min(iRoundUpPow2(this->de_dx_.w()*this->de_dx_.h()), MAX_THREADS/2);
//Try to inrease occupancy by processing several outputs in one block
int nouts_per_block = max(MAX_OCCUP_THREADS/nthreads_per_out , 1);
//Restrict the number of outputs
nouts_per_block = min(nouts_per_block, this->de_dx_.d());
dim3 threads(nthreads_per_out,nouts_per_block,1);
//Use 3rd dimension if CC>=2.0
//#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 200
// dim3 blocks(this->weights().w()*this->weights().h(),input.d(),this->de_dx_.d()/nouts_per_block);
//#else
dim3 blocks(this->weights().w()*this->weights().h(),input.d()*(this->de_dx_.d()/nouts_per_block), 1);
//#endif
int nthreads = threads.x;
int smem_size = (nthreads + this->out().w()*this->out().h())*nouts_per_block*sizeof(T);
switch(nthreads)
{
case 1 : ComputeGradientKernel<T, 1 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 2 : ComputeGradientKernel<T, 2 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 4 : ComputeGradientKernel<T, 4 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 8 : ComputeGradientKernel<T, 8 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 16 : ComputeGradientKernel<T, 16 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 32 : ComputeGradientKernel<T, 32 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 64 : ComputeGradientKernel<T, 64 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 128: ComputeGradientKernel<T, 128 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 256: ComputeGradientKernel<T, 256 , TF, hessian><<<blocks, threads, smem_size>>>(de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
case 512: ComputeGradientKernel<T, 512, TF, hessian><<<blocks, threads, smem_size>>>(de_dx_in, this->weights(), this->out(), this->con_map(),
input, de_dw_in, de_db_in); break;
default:
throw std::runtime_error("Incorrect threads number in ComputeGradientKernelProxy");
}
cutilCheckMsg("Failed to Backpropagate in CLayerCuda");
}
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::BackPropagate(const TensorGPU<T>& input, TensorGPU<T>& dedx_prev )
{
assert(this->de_dw_.HaveSameSize(this->weights()));
assert(this->de_db_.HaveSameSize(this->biases()));
assert(this->de_dx_.HaveSameSize(this->out()));
dedx_prev.ZeroMemory();
BackpropagateKernelProxy<false>(input, dedx_prev);
ComputeGradient(input);
}
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::BackPropagateHessian(const TensorGPU<T>& input, TensorGPU<T>& d2edx2_prev )
{
assert(this->d2e_dw2_.HaveSameSize(this->weights()));
assert(this->d2e_db2_.HaveSameSize(this->biases()));
assert(this->d2e_dx2_.HaveSameSize(this->out()));
d2edx2_prev.ZeroMemory();
BackpropagateKernelProxy<true>(input, d2edx2_prev);
ComputeHessian(input);
}
/* Compute gradient without backpropagating errors */
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::ComputeGradient(const TensorGPU<T>& input)
{
ComputeGradientKernelProxy<false>(input);
}
/* Compute Hessian without backpropagating errors */
template <class T, class TF>
void CLayer<TensorGPU, T, TF>::ComputeHessian(const TensorGPU<T>& input)
{
ComputeGradientKernelProxy<true>(input);
this->num_hessian_accums_++;
}
#endif //HAVE_CUDA
} |
7fee024794cee80f36a87589cda42da478385b37.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef HAVE_CUDA
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/sort.h>
#endif
#include "DataMgr/Allocators/ThrustAllocator.h"
#include "InPlaceSortImpl.h"
#ifdef HAVE_CUDA
#include <hip/hip_runtime.h>
hipStream_t getQueryEngineCudaStreamForDevice(int device_num);
#include "Logger/Logger.h"
#define checkCudaErrors(err) CHECK_EQ(err, hipSuccess)
template <typename T>
void sort_on_gpu(T* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
const bool desc,
ThrustAllocator& alloc,
const int device_id) {
thrust::device_ptr<T> key_ptr(val_buff);
thrust::device_ptr<int32_t> idx_ptr(idx_buff);
thrust::sequence(idx_ptr, idx_ptr + entry_count);
auto qe_cuda_stream = getQueryEngineCudaStreamForDevice(device_id);
if (desc) {
thrust::sort_by_key(thrust::hip::par(alloc).on(qe_cuda_stream),
key_ptr,
key_ptr + entry_count,
idx_ptr,
thrust::greater<T>());
} else {
thrust::sort_by_key(thrust::hip::par(alloc).on(qe_cuda_stream),
key_ptr,
key_ptr + entry_count,
idx_ptr);
}
checkCudaErrors(hipStreamSynchronize(qe_cuda_stream));
}
template <typename T>
void apply_permutation_on_gpu(T* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
ThrustAllocator& alloc,
const int device_id) {
thrust::device_ptr<T> key_ptr(val_buff);
thrust::device_ptr<int32_t> idx_ptr(idx_buff);
const size_t buf_size = entry_count * sizeof(T);
T* raw_ptr = reinterpret_cast<T*>(alloc.allocate(buf_size));
thrust::device_ptr<T> tmp_ptr(raw_ptr);
auto qe_cuda_stream = getQueryEngineCudaStreamForDevice(device_id);
thrust::copy(thrust::hip::par(alloc).on(qe_cuda_stream),
key_ptr,
key_ptr + entry_count,
tmp_ptr);
checkCudaErrors(hipStreamSynchronize(qe_cuda_stream));
thrust::gather(thrust::hip::par(alloc).on(qe_cuda_stream),
idx_ptr,
idx_ptr + entry_count,
tmp_ptr,
key_ptr);
checkCudaErrors(hipStreamSynchronize(qe_cuda_stream));
alloc.deallocate(reinterpret_cast<int8_t*>(raw_ptr), buf_size);
}
template <typename T>
void sort_on_cpu(T* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
const bool desc) {
thrust::sequence(idx_buff, idx_buff + entry_count);
if (desc) {
thrust::sort_by_key(val_buff, val_buff + entry_count, idx_buff, thrust::greater<T>());
} else {
thrust::sort_by_key(val_buff, val_buff + entry_count, idx_buff);
}
}
template <typename T>
void apply_permutation_on_cpu(T* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
T* tmp_buff) {
thrust::copy(val_buff, val_buff + entry_count, tmp_buff);
thrust::gather(idx_buff, idx_buff + entry_count, tmp_buff, val_buff);
}
#endif
void sort_on_gpu(int64_t* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
const bool desc,
const uint32_t chosen_bytes,
ThrustAllocator& alloc,
const int device_id) {
#ifdef HAVE_CUDA
switch (chosen_bytes) {
case 1:
sort_on_gpu(reinterpret_cast<int8_t*>(val_buff),
idx_buff,
entry_count,
desc,
alloc,
device_id);
break;
case 2:
sort_on_gpu(reinterpret_cast<int16_t*>(val_buff),
idx_buff,
entry_count,
desc,
alloc,
device_id);
break;
case 4:
sort_on_gpu(reinterpret_cast<int32_t*>(val_buff),
idx_buff,
entry_count,
desc,
alloc,
device_id);
break;
case 8:
sort_on_gpu(val_buff, idx_buff, entry_count, desc, alloc, device_id);
break;
default:
// FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
break;
}
#endif
}
void sort_on_cpu(int64_t* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
const bool desc,
const uint32_t chosen_bytes) {
#ifdef HAVE_CUDA
switch (chosen_bytes) {
case 1:
sort_on_cpu(reinterpret_cast<int8_t*>(val_buff), idx_buff, entry_count, desc);
break;
case 2:
sort_on_cpu(reinterpret_cast<int16_t*>(val_buff), idx_buff, entry_count, desc);
break;
case 4:
sort_on_cpu(reinterpret_cast<int32_t*>(val_buff), idx_buff, entry_count, desc);
break;
case 8:
sort_on_cpu(val_buff, idx_buff, entry_count, desc);
break;
default:
// FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
break;
}
#endif
}
void apply_permutation_on_gpu(int64_t* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
const uint32_t chosen_bytes,
ThrustAllocator& alloc,
const int device_id) {
#ifdef HAVE_CUDA
switch (chosen_bytes) {
case 1:
apply_permutation_on_gpu(
reinterpret_cast<int8_t*>(val_buff), idx_buff, entry_count, alloc, device_id);
break;
case 2:
apply_permutation_on_gpu(
reinterpret_cast<int16_t*>(val_buff), idx_buff, entry_count, alloc, device_id);
break;
case 4:
apply_permutation_on_gpu(
reinterpret_cast<int32_t*>(val_buff), idx_buff, entry_count, alloc, device_id);
break;
case 8:
apply_permutation_on_gpu(val_buff, idx_buff, entry_count, alloc, device_id);
break;
default:
// FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
break;
}
#endif
}
void apply_permutation_on_cpu(int64_t* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
int64_t* tmp_buff,
const uint32_t chosen_bytes) {
#ifdef HAVE_CUDA
switch (chosen_bytes) {
case 1:
apply_permutation_on_cpu(reinterpret_cast<int8_t*>(val_buff),
idx_buff,
entry_count,
reinterpret_cast<int8_t*>(tmp_buff));
break;
case 2:
apply_permutation_on_cpu(reinterpret_cast<int16_t*>(val_buff),
idx_buff,
entry_count,
reinterpret_cast<int16_t*>(tmp_buff));
break;
case 4:
apply_permutation_on_cpu(reinterpret_cast<int32_t*>(val_buff),
idx_buff,
entry_count,
reinterpret_cast<int32_t*>(tmp_buff));
break;
case 8:
apply_permutation_on_cpu(val_buff, idx_buff, entry_count, tmp_buff);
break;
default:
// FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
break;
}
#endif
}
| 7fee024794cee80f36a87589cda42da478385b37.cu | #ifdef HAVE_CUDA
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/sort.h>
#endif
#include "DataMgr/Allocators/ThrustAllocator.h"
#include "InPlaceSortImpl.h"
#ifdef HAVE_CUDA
#include <cuda.h>
CUstream getQueryEngineCudaStreamForDevice(int device_num);
#include "Logger/Logger.h"
#define checkCudaErrors(err) CHECK_EQ(err, CUDA_SUCCESS)
template <typename T>
void sort_on_gpu(T* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
const bool desc,
ThrustAllocator& alloc,
const int device_id) {
thrust::device_ptr<T> key_ptr(val_buff);
thrust::device_ptr<int32_t> idx_ptr(idx_buff);
thrust::sequence(idx_ptr, idx_ptr + entry_count);
auto qe_cuda_stream = getQueryEngineCudaStreamForDevice(device_id);
if (desc) {
thrust::sort_by_key(thrust::cuda::par(alloc).on(qe_cuda_stream),
key_ptr,
key_ptr + entry_count,
idx_ptr,
thrust::greater<T>());
} else {
thrust::sort_by_key(thrust::cuda::par(alloc).on(qe_cuda_stream),
key_ptr,
key_ptr + entry_count,
idx_ptr);
}
checkCudaErrors(cuStreamSynchronize(qe_cuda_stream));
}
template <typename T>
void apply_permutation_on_gpu(T* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
ThrustAllocator& alloc,
const int device_id) {
thrust::device_ptr<T> key_ptr(val_buff);
thrust::device_ptr<int32_t> idx_ptr(idx_buff);
const size_t buf_size = entry_count * sizeof(T);
T* raw_ptr = reinterpret_cast<T*>(alloc.allocate(buf_size));
thrust::device_ptr<T> tmp_ptr(raw_ptr);
auto qe_cuda_stream = getQueryEngineCudaStreamForDevice(device_id);
thrust::copy(thrust::cuda::par(alloc).on(qe_cuda_stream),
key_ptr,
key_ptr + entry_count,
tmp_ptr);
checkCudaErrors(cuStreamSynchronize(qe_cuda_stream));
thrust::gather(thrust::cuda::par(alloc).on(qe_cuda_stream),
idx_ptr,
idx_ptr + entry_count,
tmp_ptr,
key_ptr);
checkCudaErrors(cuStreamSynchronize(qe_cuda_stream));
alloc.deallocate(reinterpret_cast<int8_t*>(raw_ptr), buf_size);
}
template <typename T>
void sort_on_cpu(T* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
const bool desc) {
thrust::sequence(idx_buff, idx_buff + entry_count);
if (desc) {
thrust::sort_by_key(val_buff, val_buff + entry_count, idx_buff, thrust::greater<T>());
} else {
thrust::sort_by_key(val_buff, val_buff + entry_count, idx_buff);
}
}
template <typename T>
void apply_permutation_on_cpu(T* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
T* tmp_buff) {
thrust::copy(val_buff, val_buff + entry_count, tmp_buff);
thrust::gather(idx_buff, idx_buff + entry_count, tmp_buff, val_buff);
}
#endif
void sort_on_gpu(int64_t* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
const bool desc,
const uint32_t chosen_bytes,
ThrustAllocator& alloc,
const int device_id) {
#ifdef HAVE_CUDA
switch (chosen_bytes) {
case 1:
sort_on_gpu(reinterpret_cast<int8_t*>(val_buff),
idx_buff,
entry_count,
desc,
alloc,
device_id);
break;
case 2:
sort_on_gpu(reinterpret_cast<int16_t*>(val_buff),
idx_buff,
entry_count,
desc,
alloc,
device_id);
break;
case 4:
sort_on_gpu(reinterpret_cast<int32_t*>(val_buff),
idx_buff,
entry_count,
desc,
alloc,
device_id);
break;
case 8:
sort_on_gpu(val_buff, idx_buff, entry_count, desc, alloc, device_id);
break;
default:
// FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
break;
}
#endif
}
void sort_on_cpu(int64_t* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
const bool desc,
const uint32_t chosen_bytes) {
#ifdef HAVE_CUDA
switch (chosen_bytes) {
case 1:
sort_on_cpu(reinterpret_cast<int8_t*>(val_buff), idx_buff, entry_count, desc);
break;
case 2:
sort_on_cpu(reinterpret_cast<int16_t*>(val_buff), idx_buff, entry_count, desc);
break;
case 4:
sort_on_cpu(reinterpret_cast<int32_t*>(val_buff), idx_buff, entry_count, desc);
break;
case 8:
sort_on_cpu(val_buff, idx_buff, entry_count, desc);
break;
default:
// FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
break;
}
#endif
}
void apply_permutation_on_gpu(int64_t* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
const uint32_t chosen_bytes,
ThrustAllocator& alloc,
const int device_id) {
#ifdef HAVE_CUDA
switch (chosen_bytes) {
case 1:
apply_permutation_on_gpu(
reinterpret_cast<int8_t*>(val_buff), idx_buff, entry_count, alloc, device_id);
break;
case 2:
apply_permutation_on_gpu(
reinterpret_cast<int16_t*>(val_buff), idx_buff, entry_count, alloc, device_id);
break;
case 4:
apply_permutation_on_gpu(
reinterpret_cast<int32_t*>(val_buff), idx_buff, entry_count, alloc, device_id);
break;
case 8:
apply_permutation_on_gpu(val_buff, idx_buff, entry_count, alloc, device_id);
break;
default:
// FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
break;
}
#endif
}
void apply_permutation_on_cpu(int64_t* val_buff,
int32_t* idx_buff,
const uint64_t entry_count,
int64_t* tmp_buff,
const uint32_t chosen_bytes) {
#ifdef HAVE_CUDA
switch (chosen_bytes) {
case 1:
apply_permutation_on_cpu(reinterpret_cast<int8_t*>(val_buff),
idx_buff,
entry_count,
reinterpret_cast<int8_t*>(tmp_buff));
break;
case 2:
apply_permutation_on_cpu(reinterpret_cast<int16_t*>(val_buff),
idx_buff,
entry_count,
reinterpret_cast<int16_t*>(tmp_buff));
break;
case 4:
apply_permutation_on_cpu(reinterpret_cast<int32_t*>(val_buff),
idx_buff,
entry_count,
reinterpret_cast<int32_t*>(tmp_buff));
break;
case 8:
apply_permutation_on_cpu(val_buff, idx_buff, entry_count, tmp_buff);
break;
default:
// FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now.
break;
}
#endif
}
|
7752a945e76b656c65ecef437a4355977184c498.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// counting Hamilton cycle, CUDA acceleration
#include<stdio.h>
#include<stdlib.h>
#define MAX_BLOCK_SIZE 256
#define MAX_ARRAY_SIZE (1024*8)
typedef unsigned long long u64;
// any 2 <= mod <= 2^31 should work
__host__ __device__ unsigned mod_sum(unsigned a, unsigned b, unsigned mod) {
unsigned c = a+b;
return c >= mod ? c-mod : c;
}
__host__ __device__ u64 mod_sum64(u64 a, u64 b, u64 mod) {
u64 c = a+b;
return c >= mod ? c-mod : c;
}
template<int k>
__launch_bounds__(MAX_BLOCK_SIZE)
__global__ void ha2(int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) {
__shared__ unsigned long long qc[1024]; // transition count
__shared__ unsigned long long ai[64]; // adjacency matrix as bitset
//const int k = blockDim.x;
const int tid = threadIdx.x;
const int bid = threadIdx.y + blockIdx.x * blockDim.y;
const int sha = threadIdx.y * k;
const int gridSize = blockDim.y * gridDim.x;
unsigned long long s = part[bid];
unsigned long long mask = (1ull<<k) - 1;
unsigned long long total = 0;
// fetch adjacency matrix
for (int i = tid+sha; i < n; i += blockDim.y * k) {
unsigned long long aa = 0;
for (int j = 0; j < n; j++) {
aa = aa | static_cast<unsigned long long>(adj[i * n + j]) << j;
}
ai[i] = aa;
}
__syncthreads();
for (int runs = 0; runs < work; runs += gridSize) {
unsigned at;
{
unsigned long long row = s;
for (int i = 0; i < tid; i++) {
row = row & (row-1);
}
at = __ffsll(row)-1;
}
// making row "long long" would make program 3x slow, so I use 2 unsigned int
unsigned row = 0, row2 = 0;
{
// build transition table
unsigned long long me = ai[at];
for (int i = n-2; i >= 0; i--) {
if (s>>i & 1) {
row2 = row2 << 1 | row >> 31;
row = row + row + (me>>i & 1);
}
}
// initial state
qc[tid+sha] = (me >> (n-1)) & 1;
__syncthreads();
}
// calculate each transition, uses GPU SIMD feature
for (int t = 1; t < n-1; t++) {
unsigned long long sum = 0;
unsigned rr = row;
for (int i = 0; i < min(k, 32); i++) {
//sum = mod_sum(sum, qc[i+sha] * (row>>i & 1), mod);
//sum = mod_sum64(sum, qc[i+sha] * (rr & 1), mod);
//sum = mod_sum64(sum, qc[i+sha] * dd[i], mod);
sum = mod_sum64(sum, qc[i+sha] & 0LL-(rr & 1), mod);
rr >>= 1;
}
if (k > 32) {
rr = row2;
for (int i = 0; i < k-32; i++) {
sum = mod_sum64(sum, qc[i+32+sha] & 0ULL-(rr & 1), mod);
rr >>= 1;
}
}
__syncthreads();
qc[tid+sha] = sum;
__syncthreads();
}
// last transition
{
if (!(ai[n-1] >> at & 1)) qc[tid+sha] = 0;
__syncthreads();
unsigned long long count = 0;
for (int i = 0; i < k; i++) {
count = mod_sum64(count, qc[i+sha], mod);
}
//if (tid==0) printf("[%d:%d],", s, count);
if (runs + bid < work) {
total = mod_sum64(count, total, mod);
}
}
// get next work
unsigned bit = s & (-s);
s += bit;
s |= mask >> __popcll(s);
__syncthreads();
}
if (tid == 0) {
// output total for this block
ret[bid] = total;
}
}
int n;
int adj[64*64];
unsigned part[MAX_ARRAY_SIZE];
unsigned long long ret[MAX_ARRAY_SIZE];
long long nCr[65][65];
u64 getComb(long long idx, int n, int r) {
u64 ans = 0;
n -= 1;
while (r > 0) {
if (idx < nCr[n][r]) n -= 1;
else {
ans |= u64(1)<<(n);
idx -= nCr[n][r];
n -= 1;
r -= 1;
}
}
return ans;
}
void ha4(int gridSize, int blockSize, int k, int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) {
dim3 bsz(k, blockSize);
switch (k) {
#define HA4_k(k) case k:hipLaunchKernelGGL(( ha2<k>), dim3(gridSize), dim3(bsz), 0, 0, n, work, part, adj, ret, mod); break;
HA4_k(2)
HA4_k(3)
HA4_k(4)
HA4_k(5)
HA4_k(6)HA4_k(7)HA4_k(8)HA4_k(9)HA4_k(10)
HA4_k(11)HA4_k(12)HA4_k(13)HA4_k(14)HA4_k(15)
HA4_k(16)HA4_k(17)HA4_k(18)HA4_k(19)HA4_k(20)
HA4_k(21)HA4_k(22)HA4_k(23)HA4_k(24)HA4_k(25)
HA4_k(26)HA4_k(27)HA4_k(28)HA4_k(29)HA4_k(30)
HA4_k(31)HA4_k(32)
#undef HA4_k
}
hipError_t status = hipGetLastError();
if (status != hipSuccess) {
fprintf(stderr, "%s\n", hipGetErrorString(status));
}
}
int main() {
int *gpu_adj;
unsigned *gpu_part;
unsigned long long *gpu_ret;
scanf("%d", &n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (i != j) adj[i*n+j] = rand()>>5&1;
}
}
for (int i = 0; i < n; i++) {
char op;
for (int j = 0; j < n; j++) {
if (scanf(" %c", &op) == 1 && i != j) {
adj[i*n+j] = op == '1';
}
}
}
for (int i = 0; i <= 64; i++) {
nCr[i][0] = nCr[i][i] = 1;
for (int j = 1; j < i; j++) nCr[i][j] = nCr[i-1][j-1] + nCr[i-1][j];
}
hipMalloc(&gpu_part, sizeof part);
hipMalloc(&gpu_adj, sizeof adj);
hipMalloc(&gpu_ret, sizeof ret);
hipMemcpy(gpu_adj, adj, sizeof adj, hipMemcpyHostToDevice);
unsigned long long ans = 0;
unsigned long long mod = 0;
for (int k = 1; k <= n-1; k++) {
int wo = nCr[n-1][k];
int blockSize = wo;
if (blockSize > MAX_BLOCK_SIZE / k) blockSize = MAX_BLOCK_SIZE / k;
int gridSize = wo / blockSize;
if (blockSize * gridSize > MAX_ARRAY_SIZE) gridSize = MAX_ARRAY_SIZE / blockSize;
int totSize = blockSize * gridSize;
fprintf(stderr, "block size = (%d,%d,1) grid size = (%d,1,1)\n", k, blockSize, gridSize);
//for (int j = 0; j < wo; j++) printf("%d,", getComb(j, n-1, k));
for (int j = 0; j < totSize; j++) {
int step = wo / totSize * j;
if (j < wo % totSize) step += j;
else step += wo % totSize;
//printf("step=%d\n", step);
part[j] = getComb(step, n-1, k);
}
hipMemcpy(gpu_part, part, sizeof(int) * totSize, hipMemcpyHostToDevice);
ha4(gridSize, blockSize, k, n, wo, gpu_part, gpu_adj, gpu_ret, mod);
hipDeviceSynchronize();
hipMemcpy(ret, gpu_ret, sizeof(long long) * totSize, hipMemcpyDeviceToHost);
unsigned long long sum = 0;
for (int j = 0; j < totSize; j++) {
sum = mod_sum64(sum, ret[j], 0);
}
//printf("sum = %u\n", sum);
if ((n-k)%2 == 1) ans = mod_sum64(ans, sum, mod);
else if (sum != 0) ans = mod_sum64(ans, mod-sum, mod);
}
printf("ans = %llu\n", ans);
hipFree(gpu_ret);
hipFree(gpu_adj);
hipFree(gpu_part);
return 0;
}
| 7752a945e76b656c65ecef437a4355977184c498.cu | // counting Hamilton cycle, CUDA acceleration
#include<stdio.h>
#include<stdlib.h>
#define MAX_BLOCK_SIZE 256
#define MAX_ARRAY_SIZE (1024*8)
typedef unsigned long long u64;
// any 2 <= mod <= 2^31 should work
__host__ __device__ unsigned mod_sum(unsigned a, unsigned b, unsigned mod) {
unsigned c = a+b;
return c >= mod ? c-mod : c;
}
__host__ __device__ u64 mod_sum64(u64 a, u64 b, u64 mod) {
u64 c = a+b;
return c >= mod ? c-mod : c;
}
template<int k>
__launch_bounds__(MAX_BLOCK_SIZE)
__global__ void ha2(int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) {
__shared__ unsigned long long qc[1024]; // transition count
__shared__ unsigned long long ai[64]; // adjacency matrix as bitset
//const int k = blockDim.x;
const int tid = threadIdx.x;
const int bid = threadIdx.y + blockIdx.x * blockDim.y;
const int sha = threadIdx.y * k;
const int gridSize = blockDim.y * gridDim.x;
unsigned long long s = part[bid];
unsigned long long mask = (1ull<<k) - 1;
unsigned long long total = 0;
// fetch adjacency matrix
for (int i = tid+sha; i < n; i += blockDim.y * k) {
unsigned long long aa = 0;
for (int j = 0; j < n; j++) {
aa = aa | static_cast<unsigned long long>(adj[i * n + j]) << j;
}
ai[i] = aa;
}
__syncthreads();
for (int runs = 0; runs < work; runs += gridSize) {
unsigned at;
{
unsigned long long row = s;
for (int i = 0; i < tid; i++) {
row = row & (row-1);
}
at = __ffsll(row)-1;
}
// making row "long long" would make program 3x slow, so I use 2 unsigned int
unsigned row = 0, row2 = 0;
{
// build transition table
unsigned long long me = ai[at];
for (int i = n-2; i >= 0; i--) {
if (s>>i & 1) {
row2 = row2 << 1 | row >> 31;
row = row + row + (me>>i & 1);
}
}
// initial state
qc[tid+sha] = (me >> (n-1)) & 1;
__syncthreads();
}
// calculate each transition, uses GPU SIMD feature
for (int t = 1; t < n-1; t++) {
unsigned long long sum = 0;
unsigned rr = row;
for (int i = 0; i < min(k, 32); i++) {
//sum = mod_sum(sum, qc[i+sha] * (row>>i & 1), mod);
//sum = mod_sum64(sum, qc[i+sha] * (rr & 1), mod);
//sum = mod_sum64(sum, qc[i+sha] * dd[i], mod);
sum = mod_sum64(sum, qc[i+sha] & 0LL-(rr & 1), mod);
rr >>= 1;
}
if (k > 32) {
rr = row2;
for (int i = 0; i < k-32; i++) {
sum = mod_sum64(sum, qc[i+32+sha] & 0ULL-(rr & 1), mod);
rr >>= 1;
}
}
__syncthreads();
qc[tid+sha] = sum;
__syncthreads();
}
// last transition
{
if (!(ai[n-1] >> at & 1)) qc[tid+sha] = 0;
__syncthreads();
unsigned long long count = 0;
for (int i = 0; i < k; i++) {
count = mod_sum64(count, qc[i+sha], mod);
}
//if (tid==0) printf("[%d:%d],", s, count);
if (runs + bid < work) {
total = mod_sum64(count, total, mod);
}
}
// get next work
unsigned bit = s & (-s);
s += bit;
s |= mask >> __popcll(s);
__syncthreads();
}
if (tid == 0) {
// output total for this block
ret[bid] = total;
}
}
int n;
int adj[64*64];
unsigned part[MAX_ARRAY_SIZE];
unsigned long long ret[MAX_ARRAY_SIZE];
long long nCr[65][65];
u64 getComb(long long idx, int n, int r) {
u64 ans = 0;
n -= 1;
while (r > 0) {
if (idx < nCr[n][r]) n -= 1;
else {
ans |= u64(1)<<(n);
idx -= nCr[n][r];
n -= 1;
r -= 1;
}
}
return ans;
}
void ha4(int gridSize, int blockSize, int k, int n, int work, unsigned *part, int *adj, unsigned long long *ret, unsigned long long mod) {
dim3 bsz(k, blockSize);
switch (k) {
#define HA4_k(k) case k: ha2<k><<<gridSize, bsz>>>(n, work, part, adj, ret, mod); break;
HA4_k(2)
HA4_k(3)
HA4_k(4)
HA4_k(5)
HA4_k(6)HA4_k(7)HA4_k(8)HA4_k(9)HA4_k(10)
HA4_k(11)HA4_k(12)HA4_k(13)HA4_k(14)HA4_k(15)
HA4_k(16)HA4_k(17)HA4_k(18)HA4_k(19)HA4_k(20)
HA4_k(21)HA4_k(22)HA4_k(23)HA4_k(24)HA4_k(25)
HA4_k(26)HA4_k(27)HA4_k(28)HA4_k(29)HA4_k(30)
HA4_k(31)HA4_k(32)
#undef HA4_k
}
cudaError_t status = cudaGetLastError();
if (status != cudaSuccess) {
fprintf(stderr, "%s\n", cudaGetErrorString(status));
}
}
int main() {
int *gpu_adj;
unsigned *gpu_part;
unsigned long long *gpu_ret;
scanf("%d", &n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (i != j) adj[i*n+j] = rand()>>5&1;
}
}
for (int i = 0; i < n; i++) {
char op;
for (int j = 0; j < n; j++) {
if (scanf(" %c", &op) == 1 && i != j) {
adj[i*n+j] = op == '1';
}
}
}
for (int i = 0; i <= 64; i++) {
nCr[i][0] = nCr[i][i] = 1;
for (int j = 1; j < i; j++) nCr[i][j] = nCr[i-1][j-1] + nCr[i-1][j];
}
cudaMalloc(&gpu_part, sizeof part);
cudaMalloc(&gpu_adj, sizeof adj);
cudaMalloc(&gpu_ret, sizeof ret);
cudaMemcpy(gpu_adj, adj, sizeof adj, cudaMemcpyHostToDevice);
unsigned long long ans = 0;
unsigned long long mod = 0;
for (int k = 1; k <= n-1; k++) {
int wo = nCr[n-1][k];
int blockSize = wo;
if (blockSize > MAX_BLOCK_SIZE / k) blockSize = MAX_BLOCK_SIZE / k;
int gridSize = wo / blockSize;
if (blockSize * gridSize > MAX_ARRAY_SIZE) gridSize = MAX_ARRAY_SIZE / blockSize;
int totSize = blockSize * gridSize;
fprintf(stderr, "block size = (%d,%d,1) grid size = (%d,1,1)\n", k, blockSize, gridSize);
//for (int j = 0; j < wo; j++) printf("%d,", getComb(j, n-1, k));
for (int j = 0; j < totSize; j++) {
int step = wo / totSize * j;
if (j < wo % totSize) step += j;
else step += wo % totSize;
//printf("step=%d\n", step);
part[j] = getComb(step, n-1, k);
}
cudaMemcpy(gpu_part, part, sizeof(int) * totSize, cudaMemcpyHostToDevice);
ha4(gridSize, blockSize, k, n, wo, gpu_part, gpu_adj, gpu_ret, mod);
cudaDeviceSynchronize();
cudaMemcpy(ret, gpu_ret, sizeof(long long) * totSize, cudaMemcpyDeviceToHost);
unsigned long long sum = 0;
for (int j = 0; j < totSize; j++) {
sum = mod_sum64(sum, ret[j], 0);
}
//printf("sum = %u\n", sum);
if ((n-k)%2 == 1) ans = mod_sum64(ans, sum, mod);
else if (sum != 0) ans = mod_sum64(ans, mod-sum, mod);
}
printf("ans = %llu\n", ans);
cudaFree(gpu_ret);
cudaFree(gpu_adj);
cudaFree(gpu_part);
return 0;
}
|
a1e06fd92b472897e0754b9b3d0d355b30aff414.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define eps 1e-15
// for the older gpus atomicAdd with double arguments does not exist
#if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__)
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN !=
// NaN) } while (assumed != old);
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template <typename scalar_t>
__global__ void
dr_cuda_backword_color_batch(const scalar_t *__restrict__ grad_im_bxhxwxd,
const scalar_t *__restrict__ im_bxhxwxd,
const scalar_t *__restrict__ imidx_bxhxwx1,
const scalar_t *__restrict__ imwei_bxhxwx3,
const scalar_t *__restrict__ points2d_bxfx6,
const scalar_t *__restrict__ features_bxfx3d,
scalar_t *__restrict__ grad_points2d_bxfx6,
scalar_t *__restrict__ grad_features_bxfx3d,
scalar_t *__restrict__ debug_im_bxhxwx3, int bnum,
int height, int width, int fnum, int dnum,
int multiplier) {
/*
// thread index
*/
// bidx * height * width + heiidx * width + wididx
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= bnum || heiidx >= height || wididx >= width)
return;
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidx3 = totalidx1 * 3;
const int totalidxd = totalidx1 * dnum;
// coordinates
scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1);
// which face it belongs to?
scalar_t fidx = imidx_bxhxwx1[totalidx1];
// face begins from 1
// convert it into int, use round!
int fidxint = static_cast<int>(fidx + 0.5) - 1;
// visible faces
if (fidxint >= 0) {
const int shift1 = bidx * fnum + fidxint;
const int shift6 = shift1 * 6;
const int shift3d = shift1 * 3 * dnum;
// the imaging model is:
// I(x, y) = w0 * c0 + w1 * c1 + w2 * c2
// gradient of colors
// 3 points in one face
for (int i = 0; i < 3; i++) {
// directly use opengl weights
scalar_t w = imwei_bxhxwx3[totalidx3 + i];
int pointshift = shift3d + i * dnum;
// rgb value
for (int rgb = 0; rgb < dnum; rgb++) {
int colorshift = pointshift + rgb;
// this should be atomic operation
scalar_t *addr = grad_features_bxfx3d + colorshift;
scalar_t val = grad_im_bxhxwxd[totalidxd + rgb] * w;
atomicAdd(addr, val);
}
}
// gradient of points
// here, we calculate dl/dp
// dl/dp = dldI * dI/dp
// dI/dp = c0 * dw0 / dp + c1 * dw1 / dp + c2 * dw2 / dp
// first
// 4 coorinates
scalar_t ax = points2d_bxfx6[shift6 + 0];
scalar_t ay = points2d_bxfx6[shift6 + 1];
scalar_t bx = points2d_bxfx6[shift6 + 2];
scalar_t by = points2d_bxfx6[shift6 + 3];
scalar_t cx = points2d_bxfx6[shift6 + 4];
scalar_t cy = points2d_bxfx6[shift6 + 5];
////////////////////////////////////////////////////////////////////////////////////
// replace with other variables
scalar_t m = bx - ax;
scalar_t p = by - ay;
scalar_t n = cx - ax;
scalar_t q = cy - ay;
scalar_t s = x0 - ax;
scalar_t t = y0 - ay;
//////////////////////////////////////////////////////////////////////////////////////
scalar_t k1 = s * q - n * t;
scalar_t k2 = m * t - s * p;
scalar_t k3 = m * q - n * p;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
scalar_t dk1dm = 0;
scalar_t dk1dn = -t;
scalar_t dk1dp = 0;
scalar_t dk1dq = s;
scalar_t dk1ds = q;
scalar_t dk1dt = -n;
scalar_t dk2dm = t;
scalar_t dk2dn = 0;
scalar_t dk2dp = -s;
scalar_t dk2dq = 0;
scalar_t dk2ds = -p;
scalar_t dk2dt = m;
scalar_t dk3dm = q;
scalar_t dk3dn = -p;
scalar_t dk3dp = -n;
scalar_t dk3dq = m;
scalar_t dk3ds = 0;
scalar_t dk3dt = 0;
///////////////////////////////////////////////////////////////////////////////
// w1 = k1 / k3
// w2 = k2 / k3
// remember we need divide k3 ^ 2
scalar_t dw1dm = dk1dm * k3 - dk3dm * k1;
scalar_t dw1dn = dk1dn * k3 - dk3dn * k1;
scalar_t dw1dp = dk1dp * k3 - dk3dp * k1;
scalar_t dw1dq = dk1dq * k3 - dk3dq * k1;
scalar_t dw1ds = dk1ds * k3 - dk3ds * k1;
scalar_t dw1dt = dk1dt * k3 - dk3dt * k1;
scalar_t dw2dm = dk2dm * k3 - dk3dm * k2;
scalar_t dw2dn = dk2dn * k3 - dk3dn * k2;
scalar_t dw2dp = dk2dp * k3 - dk3dp * k2;
scalar_t dw2dq = dk2dq * k3 - dk3dq * k2;
scalar_t dw2ds = dk2ds * k3 - dk3ds * k2;
scalar_t dw2dt = dk2dt * k3 - dk3dt * k2;
//////////////////////////////////////////////////////////////////////////////////////
scalar_t dw1dax = -(dw1dm + dw1dn + dw1ds);
scalar_t dw1day = -(dw1dp + dw1dq + dw1dt);
scalar_t dw1dbx = dw1dm;
scalar_t dw1dby = dw1dp;
scalar_t dw1dcx = dw1dn;
scalar_t dw1dcy = dw1dq;
scalar_t dw2dax = -(dw2dm + dw2dn + dw2ds);
scalar_t dw2day = -(dw2dp + dw2dq + dw2dt);
scalar_t dw2dbx = dw2dm;
scalar_t dw2dby = dw2dp;
scalar_t dw2dcx = dw2dn;
scalar_t dw2dcy = dw2dq;
for (int rgb = 0; rgb < dnum; rgb++) {
// the same color for 3 points
// thus we can simplify it
scalar_t c0 = features_bxfx3d[shift3d + rgb];
scalar_t c1 = features_bxfx3d[shift3d + dnum + rgb];
scalar_t c2 = features_bxfx3d[shift3d + dnum + dnum + rgb];
scalar_t dIdax = (c1 - c0) * dw1dax + (c2 - c0) * dw2dax;
scalar_t dIday = (c1 - c0) * dw1day + (c2 - c0) * dw2day;
scalar_t dIdbx = (c1 - c0) * dw1dbx + (c2 - c0) * dw2dbx;
scalar_t dIdby = (c1 - c0) * dw1dby + (c2 - c0) * dw2dby;
scalar_t dIdcx = (c1 - c0) * dw1dcx + (c2 - c0) * dw2dcx;
scalar_t dIdcy = (c1 - c0) * dw1dcy + (c2 - c0) * dw2dcy;
scalar_t dldI =
multiplier * grad_im_bxhxwxd[totalidxd + rgb] / (k3 * k3 + eps);
atomicAdd(grad_points2d_bxfx6 + shift6 + 0, dldI * dIdax);
atomicAdd(grad_points2d_bxfx6 + shift6 + 1, dldI * dIday);
atomicAdd(grad_points2d_bxfx6 + shift6 + 2, dldI * dIdbx);
atomicAdd(grad_points2d_bxfx6 + shift6 + 3, dldI * dIdby);
atomicAdd(grad_points2d_bxfx6 + shift6 + 4, dldI * dIdcx);
atomicAdd(grad_points2d_bxfx6 + shift6 + 5, dldI * dIdcy);
}
}
}
template <typename scalar_t>
__global__ void
dr_cuda_backword_prob_batch(const scalar_t *__restrict__ grad_improb_bxhxwx1,
const scalar_t *__restrict__ improb_bxhxwx1,
const scalar_t *__restrict__ imidx_bxhxwx1,
const scalar_t *__restrict__ probface_bxhxwxk,
const scalar_t *__restrict__ probcase_bxhxwxk,
const scalar_t *__restrict__ probdis_bxhxwxk,
const scalar_t *__restrict__ probdep_bxhxwxk,
const scalar_t *__restrict__ probacc_bxhxwxk,
const scalar_t *__restrict__ points2d_bxfx6,
scalar_t *__restrict__ grad_points2dprob_bxfx6,
int bnum, int height, int width, int fnum, int knum,
int multiplier, int sigmainv) {
/*
// thread index
*/
// bidx * height * width + heiidx * width + wididx
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= bnum || heiidx >= height || wididx >= width)
return;
//////////////////////////////////////////////
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
//////////////////////////////////////////////
// coordinates
scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1);
/////////////////////////////////////
// which face it belongs to?
scalar_t fidx = imidx_bxhxwx1[totalidx1];
// face begins from 1
// convert it into int, use round!
int fidxint = static_cast<int>(fidx + 0.5) - 1;
/////////////////////////////////////
// not covered by any faces
if (fidxint < 0) {
int fidxcover = fidxint;
scalar_t dLdp = grad_improb_bxhxwx1[totalidx1];
scalar_t allprob = improb_bxhxwx1[totalidx1];
for (int kid = 0; kid < knum; kid++) {
scalar_t fidx = probface_bxhxwxk[totalidxk + kid];
// face begins from 1
// convert it into int, use round!
int fidxint = static_cast<int>(fidx + 0.5) - 1;
if (fidxint < 0)
break;
const int shift1 = bidx * fnum + fidxint;
const int shift6 = shift1 * 6;
///////////////////////////////////////////
scalar_t prob = probdis_bxhxwxk[totalidxk + kid];
scalar_t dLdz =
-1.0 * sigmainv * dLdp * (1.0 - allprob) / (1.0 - prob + eps) * prob;
///////////////////////////////////////////////////////////////////
scalar_t edgecase = probcase_bxhxwxk[totalidxk + kid];
int edgeid = static_cast<int>(edgecase + 0.5) - 1;
/////////////////////////////////////////////////////////////
if (edgeid >= 3) {
// point distance
int pshift = shift6 + (edgeid - 3) * 2;
scalar_t x1 = points2d_bxfx6[pshift + 0];
scalar_t y1 = points2d_bxfx6[pshift + 1];
scalar_t dLdx1 = dLdz * 2 * (x1 - x0);
scalar_t dLdy1 = dLdz * 2 * (y1 - y0);
atomicAdd(grad_points2dprob_bxfx6 + pshift + 0, dLdx1 / multiplier);
atomicAdd(grad_points2dprob_bxfx6 + pshift + 1, dLdy1 / multiplier);
} else {
// perpendicular distance
int pshift = shift6 + edgeid * 2;
scalar_t x1 = points2d_bxfx6[pshift + 0];
scalar_t y1 = points2d_bxfx6[pshift + 1];
int pshift2 = shift6 + ((edgeid + 1) % 3) * 2;
scalar_t x2 = points2d_bxfx6[pshift2 + 0];
scalar_t y2 = points2d_bxfx6[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
scalar_t dissquare = up * up / (down + eps);
//////////////////////////////////
scalar_t dzdA = 2 * (x0 * up - dissquare * A) / (down + eps);
scalar_t dzdB = 2 * (y0 * up - dissquare * B) / (down + eps);
scalar_t dzdC = 2 * up / (down + eps);
scalar_t dLdx1 = dLdz * (dzdB - y2 * dzdC);
scalar_t dLdy1 = dLdz * (x2 * dzdC - dzdA);
scalar_t dLdx2 = dLdz * (y1 * dzdC - dzdB);
scalar_t dLdy2 = dLdz * (dzdA - x1 * dzdC);
atomicAdd(grad_points2dprob_bxfx6 + pshift + 0, dLdx1 / multiplier);
atomicAdd(grad_points2dprob_bxfx6 + pshift + 1, dLdy1 / multiplier);
atomicAdd(grad_points2dprob_bxfx6 + pshift2 + 0, dLdx2 / multiplier);
atomicAdd(grad_points2dprob_bxfx6 + pshift2 + 1, dLdy2 / multiplier);
}
}
}
return;
}
void dr_cuda_backward_batch(
at::Tensor grad_image_bxhxwxd, at::Tensor grad_improb_bxhxwx1,
at::Tensor image_bxhxwxd, at::Tensor improb_bxhxwx1,
at::Tensor imidx_bxhxwx1, at::Tensor imwei_bxhxwx3,
at::Tensor probface_bxhxwxk, at::Tensor probcase_bxhxwxk,
at::Tensor probdis_bxhxwxk, at::Tensor probdep_bxhxwxk,
at::Tensor probacc_bxhxwxk, at::Tensor points2d_bxfx6,
at::Tensor colors_bxfx3d, at::Tensor grad_points2d_bxfx6,
at::Tensor grad_colors_bxfx3d, at::Tensor grad_points2dprob_bxfx6,
at::Tensor debug_im_bxhxwx3, int multiplier, int sigmainv) {
int bnum = grad_image_bxhxwxd.size(0);
int height = grad_image_bxhxwxd.size(1);
int width = grad_image_bxhxwxd.size(2);
int dnum = grad_image_bxhxwxd.size(3);
int fnum = grad_points2d_bxfx6.size(1);
int knum = probface_bxhxwxk.size(3);
// for bxhxw image size
const int threadnum = 1024;
const int totalthread = bnum * height * width;
const int blocknum = totalthread / threadnum + 1;
const dim3 threads(threadnum, 1, 1);
const dim3 blocks(blocknum, 1, 1);
// we exchange block and thread!
AT_DISPATCH_FLOATING_TYPES(
grad_image_bxhxwxd.type(), "dr_cuda_backward_color_batch", ([&] {
hipLaunchKernelGGL(( dr_cuda_backword_color_batch<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
grad_image_bxhxwxd.data<scalar_t>(), image_bxhxwxd.data<scalar_t>(),
imidx_bxhxwx1.data<scalar_t>(), imwei_bxhxwx3.data<scalar_t>(),
points2d_bxfx6.data<scalar_t>(), colors_bxfx3d.data<scalar_t>(),
grad_points2d_bxfx6.data<scalar_t>(),
grad_colors_bxfx3d.data<scalar_t>(),
debug_im_bxhxwx3.data<scalar_t>(), bnum, height, width, fnum, dnum,
multiplier);
}));
AT_DISPATCH_FLOATING_TYPES(
grad_image_bxhxwxd.type(), "dr_cuda_backward_prob_batch", ([&] {
hipLaunchKernelGGL(( dr_cuda_backword_prob_batch<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
grad_improb_bxhxwx1.data<scalar_t>(),
improb_bxhxwx1.data<scalar_t>(), imidx_bxhxwx1.data<scalar_t>(),
probface_bxhxwxk.data<scalar_t>(),
probcase_bxhxwxk.data<scalar_t>(), probdis_bxhxwxk.data<scalar_t>(),
probdep_bxhxwxk.data<scalar_t>(), probacc_bxhxwxk.data<scalar_t>(),
points2d_bxfx6.data<scalar_t>(),
grad_points2dprob_bxfx6.data<scalar_t>(), bnum, height, width, fnum,
knum, multiplier, sigmainv);
}));
return;
}
| a1e06fd92b472897e0754b9b3d0d355b30aff414.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define eps 1e-15
// for the older gpus atomicAdd with double arguments does not exist
#if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__)
static __inline__ __device__ double atomicAdd(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN !=
// NaN) } while (assumed != old);
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template <typename scalar_t>
__global__ void
dr_cuda_backword_color_batch(const scalar_t *__restrict__ grad_im_bxhxwxd,
const scalar_t *__restrict__ im_bxhxwxd,
const scalar_t *__restrict__ imidx_bxhxwx1,
const scalar_t *__restrict__ imwei_bxhxwx3,
const scalar_t *__restrict__ points2d_bxfx6,
const scalar_t *__restrict__ features_bxfx3d,
scalar_t *__restrict__ grad_points2d_bxfx6,
scalar_t *__restrict__ grad_features_bxfx3d,
scalar_t *__restrict__ debug_im_bxhxwx3, int bnum,
int height, int width, int fnum, int dnum,
int multiplier) {
/*
// thread index
*/
// bidx * height * width + heiidx * width + wididx
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= bnum || heiidx >= height || wididx >= width)
return;
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidx3 = totalidx1 * 3;
const int totalidxd = totalidx1 * dnum;
// coordinates
scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1);
// which face it belongs to?
scalar_t fidx = imidx_bxhxwx1[totalidx1];
// face begins from 1
// convert it into int, use round!
int fidxint = static_cast<int>(fidx + 0.5) - 1;
// visible faces
if (fidxint >= 0) {
const int shift1 = bidx * fnum + fidxint;
const int shift6 = shift1 * 6;
const int shift3d = shift1 * 3 * dnum;
// the imaging model is:
// I(x, y) = w0 * c0 + w1 * c1 + w2 * c2
// gradient of colors
// 3 points in one face
for (int i = 0; i < 3; i++) {
// directly use opengl weights
scalar_t w = imwei_bxhxwx3[totalidx3 + i];
int pointshift = shift3d + i * dnum;
// rgb value
for (int rgb = 0; rgb < dnum; rgb++) {
int colorshift = pointshift + rgb;
// this should be atomic operation
scalar_t *addr = grad_features_bxfx3d + colorshift;
scalar_t val = grad_im_bxhxwxd[totalidxd + rgb] * w;
atomicAdd(addr, val);
}
}
// gradient of points
// here, we calculate dl/dp
// dl/dp = dldI * dI/dp
// dI/dp = c0 * dw0 / dp + c1 * dw1 / dp + c2 * dw2 / dp
// first
// 4 coorinates
scalar_t ax = points2d_bxfx6[shift6 + 0];
scalar_t ay = points2d_bxfx6[shift6 + 1];
scalar_t bx = points2d_bxfx6[shift6 + 2];
scalar_t by = points2d_bxfx6[shift6 + 3];
scalar_t cx = points2d_bxfx6[shift6 + 4];
scalar_t cy = points2d_bxfx6[shift6 + 5];
////////////////////////////////////////////////////////////////////////////////////
// replace with other variables
scalar_t m = bx - ax;
scalar_t p = by - ay;
scalar_t n = cx - ax;
scalar_t q = cy - ay;
scalar_t s = x0 - ax;
scalar_t t = y0 - ay;
//////////////////////////////////////////////////////////////////////////////////////
scalar_t k1 = s * q - n * t;
scalar_t k2 = m * t - s * p;
scalar_t k3 = m * q - n * p;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
scalar_t dk1dm = 0;
scalar_t dk1dn = -t;
scalar_t dk1dp = 0;
scalar_t dk1dq = s;
scalar_t dk1ds = q;
scalar_t dk1dt = -n;
scalar_t dk2dm = t;
scalar_t dk2dn = 0;
scalar_t dk2dp = -s;
scalar_t dk2dq = 0;
scalar_t dk2ds = -p;
scalar_t dk2dt = m;
scalar_t dk3dm = q;
scalar_t dk3dn = -p;
scalar_t dk3dp = -n;
scalar_t dk3dq = m;
scalar_t dk3ds = 0;
scalar_t dk3dt = 0;
///////////////////////////////////////////////////////////////////////////////
// w1 = k1 / k3
// w2 = k2 / k3
// remember we need divide k3 ^ 2
scalar_t dw1dm = dk1dm * k3 - dk3dm * k1;
scalar_t dw1dn = dk1dn * k3 - dk3dn * k1;
scalar_t dw1dp = dk1dp * k3 - dk3dp * k1;
scalar_t dw1dq = dk1dq * k3 - dk3dq * k1;
scalar_t dw1ds = dk1ds * k3 - dk3ds * k1;
scalar_t dw1dt = dk1dt * k3 - dk3dt * k1;
scalar_t dw2dm = dk2dm * k3 - dk3dm * k2;
scalar_t dw2dn = dk2dn * k3 - dk3dn * k2;
scalar_t dw2dp = dk2dp * k3 - dk3dp * k2;
scalar_t dw2dq = dk2dq * k3 - dk3dq * k2;
scalar_t dw2ds = dk2ds * k3 - dk3ds * k2;
scalar_t dw2dt = dk2dt * k3 - dk3dt * k2;
//////////////////////////////////////////////////////////////////////////////////////
scalar_t dw1dax = -(dw1dm + dw1dn + dw1ds);
scalar_t dw1day = -(dw1dp + dw1dq + dw1dt);
scalar_t dw1dbx = dw1dm;
scalar_t dw1dby = dw1dp;
scalar_t dw1dcx = dw1dn;
scalar_t dw1dcy = dw1dq;
scalar_t dw2dax = -(dw2dm + dw2dn + dw2ds);
scalar_t dw2day = -(dw2dp + dw2dq + dw2dt);
scalar_t dw2dbx = dw2dm;
scalar_t dw2dby = dw2dp;
scalar_t dw2dcx = dw2dn;
scalar_t dw2dcy = dw2dq;
for (int rgb = 0; rgb < dnum; rgb++) {
// the same color for 3 points
// thus we can simplify it
scalar_t c0 = features_bxfx3d[shift3d + rgb];
scalar_t c1 = features_bxfx3d[shift3d + dnum + rgb];
scalar_t c2 = features_bxfx3d[shift3d + dnum + dnum + rgb];
scalar_t dIdax = (c1 - c0) * dw1dax + (c2 - c0) * dw2dax;
scalar_t dIday = (c1 - c0) * dw1day + (c2 - c0) * dw2day;
scalar_t dIdbx = (c1 - c0) * dw1dbx + (c2 - c0) * dw2dbx;
scalar_t dIdby = (c1 - c0) * dw1dby + (c2 - c0) * dw2dby;
scalar_t dIdcx = (c1 - c0) * dw1dcx + (c2 - c0) * dw2dcx;
scalar_t dIdcy = (c1 - c0) * dw1dcy + (c2 - c0) * dw2dcy;
scalar_t dldI =
multiplier * grad_im_bxhxwxd[totalidxd + rgb] / (k3 * k3 + eps);
atomicAdd(grad_points2d_bxfx6 + shift6 + 0, dldI * dIdax);
atomicAdd(grad_points2d_bxfx6 + shift6 + 1, dldI * dIday);
atomicAdd(grad_points2d_bxfx6 + shift6 + 2, dldI * dIdbx);
atomicAdd(grad_points2d_bxfx6 + shift6 + 3, dldI * dIdby);
atomicAdd(grad_points2d_bxfx6 + shift6 + 4, dldI * dIdcx);
atomicAdd(grad_points2d_bxfx6 + shift6 + 5, dldI * dIdcy);
}
}
}
template <typename scalar_t>
__global__ void
dr_cuda_backword_prob_batch(const scalar_t *__restrict__ grad_improb_bxhxwx1,
const scalar_t *__restrict__ improb_bxhxwx1,
const scalar_t *__restrict__ imidx_bxhxwx1,
const scalar_t *__restrict__ probface_bxhxwxk,
const scalar_t *__restrict__ probcase_bxhxwxk,
const scalar_t *__restrict__ probdis_bxhxwxk,
const scalar_t *__restrict__ probdep_bxhxwxk,
const scalar_t *__restrict__ probacc_bxhxwxk,
const scalar_t *__restrict__ points2d_bxfx6,
scalar_t *__restrict__ grad_points2dprob_bxfx6,
int bnum, int height, int width, int fnum, int knum,
int multiplier, int sigmainv) {
/*
// thread index
*/
// bidx * height * width + heiidx * width + wididx
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= bnum || heiidx >= height || wididx >= width)
return;
//////////////////////////////////////////////
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
//////////////////////////////////////////////
// coordinates
scalar_t x0 = 1.0 * multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = 1.0 * multiplier / height * (height - 2 * heiidx - 1);
/////////////////////////////////////
// which face it belongs to?
scalar_t fidx = imidx_bxhxwx1[totalidx1];
// face begins from 1
// convert it into int, use round!
int fidxint = static_cast<int>(fidx + 0.5) - 1;
/////////////////////////////////////
// not covered by any faces
if (fidxint < 0) {
int fidxcover = fidxint;
scalar_t dLdp = grad_improb_bxhxwx1[totalidx1];
scalar_t allprob = improb_bxhxwx1[totalidx1];
for (int kid = 0; kid < knum; kid++) {
scalar_t fidx = probface_bxhxwxk[totalidxk + kid];
// face begins from 1
// convert it into int, use round!
int fidxint = static_cast<int>(fidx + 0.5) - 1;
if (fidxint < 0)
break;
const int shift1 = bidx * fnum + fidxint;
const int shift6 = shift1 * 6;
///////////////////////////////////////////
scalar_t prob = probdis_bxhxwxk[totalidxk + kid];
scalar_t dLdz =
-1.0 * sigmainv * dLdp * (1.0 - allprob) / (1.0 - prob + eps) * prob;
///////////////////////////////////////////////////////////////////
scalar_t edgecase = probcase_bxhxwxk[totalidxk + kid];
int edgeid = static_cast<int>(edgecase + 0.5) - 1;
/////////////////////////////////////////////////////////////
if (edgeid >= 3) {
// point distance
int pshift = shift6 + (edgeid - 3) * 2;
scalar_t x1 = points2d_bxfx6[pshift + 0];
scalar_t y1 = points2d_bxfx6[pshift + 1];
scalar_t dLdx1 = dLdz * 2 * (x1 - x0);
scalar_t dLdy1 = dLdz * 2 * (y1 - y0);
atomicAdd(grad_points2dprob_bxfx6 + pshift + 0, dLdx1 / multiplier);
atomicAdd(grad_points2dprob_bxfx6 + pshift + 1, dLdy1 / multiplier);
} else {
// perpendicular distance
int pshift = shift6 + edgeid * 2;
scalar_t x1 = points2d_bxfx6[pshift + 0];
scalar_t y1 = points2d_bxfx6[pshift + 1];
int pshift2 = shift6 + ((edgeid + 1) % 3) * 2;
scalar_t x2 = points2d_bxfx6[pshift2 + 0];
scalar_t y2 = points2d_bxfx6[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
scalar_t dissquare = up * up / (down + eps);
//////////////////////////////////
scalar_t dzdA = 2 * (x0 * up - dissquare * A) / (down + eps);
scalar_t dzdB = 2 * (y0 * up - dissquare * B) / (down + eps);
scalar_t dzdC = 2 * up / (down + eps);
scalar_t dLdx1 = dLdz * (dzdB - y2 * dzdC);
scalar_t dLdy1 = dLdz * (x2 * dzdC - dzdA);
scalar_t dLdx2 = dLdz * (y1 * dzdC - dzdB);
scalar_t dLdy2 = dLdz * (dzdA - x1 * dzdC);
atomicAdd(grad_points2dprob_bxfx6 + pshift + 0, dLdx1 / multiplier);
atomicAdd(grad_points2dprob_bxfx6 + pshift + 1, dLdy1 / multiplier);
atomicAdd(grad_points2dprob_bxfx6 + pshift2 + 0, dLdx2 / multiplier);
atomicAdd(grad_points2dprob_bxfx6 + pshift2 + 1, dLdy2 / multiplier);
}
}
}
return;
}
void dr_cuda_backward_batch(
at::Tensor grad_image_bxhxwxd, at::Tensor grad_improb_bxhxwx1,
at::Tensor image_bxhxwxd, at::Tensor improb_bxhxwx1,
at::Tensor imidx_bxhxwx1, at::Tensor imwei_bxhxwx3,
at::Tensor probface_bxhxwxk, at::Tensor probcase_bxhxwxk,
at::Tensor probdis_bxhxwxk, at::Tensor probdep_bxhxwxk,
at::Tensor probacc_bxhxwxk, at::Tensor points2d_bxfx6,
at::Tensor colors_bxfx3d, at::Tensor grad_points2d_bxfx6,
at::Tensor grad_colors_bxfx3d, at::Tensor grad_points2dprob_bxfx6,
at::Tensor debug_im_bxhxwx3, int multiplier, int sigmainv) {
int bnum = grad_image_bxhxwxd.size(0);
int height = grad_image_bxhxwxd.size(1);
int width = grad_image_bxhxwxd.size(2);
int dnum = grad_image_bxhxwxd.size(3);
int fnum = grad_points2d_bxfx6.size(1);
int knum = probface_bxhxwxk.size(3);
// for bxhxw image size
const int threadnum = 1024;
const int totalthread = bnum * height * width;
const int blocknum = totalthread / threadnum + 1;
const dim3 threads(threadnum, 1, 1);
const dim3 blocks(blocknum, 1, 1);
// we exchange block and thread!
AT_DISPATCH_FLOATING_TYPES(
grad_image_bxhxwxd.type(), "dr_cuda_backward_color_batch", ([&] {
dr_cuda_backword_color_batch<scalar_t><<<blocks, threads>>>(
grad_image_bxhxwxd.data<scalar_t>(), image_bxhxwxd.data<scalar_t>(),
imidx_bxhxwx1.data<scalar_t>(), imwei_bxhxwx3.data<scalar_t>(),
points2d_bxfx6.data<scalar_t>(), colors_bxfx3d.data<scalar_t>(),
grad_points2d_bxfx6.data<scalar_t>(),
grad_colors_bxfx3d.data<scalar_t>(),
debug_im_bxhxwx3.data<scalar_t>(), bnum, height, width, fnum, dnum,
multiplier);
}));
AT_DISPATCH_FLOATING_TYPES(
grad_image_bxhxwxd.type(), "dr_cuda_backward_prob_batch", ([&] {
dr_cuda_backword_prob_batch<scalar_t><<<blocks, threads>>>(
grad_improb_bxhxwx1.data<scalar_t>(),
improb_bxhxwx1.data<scalar_t>(), imidx_bxhxwx1.data<scalar_t>(),
probface_bxhxwxk.data<scalar_t>(),
probcase_bxhxwxk.data<scalar_t>(), probdis_bxhxwxk.data<scalar_t>(),
probdep_bxhxwxk.data<scalar_t>(), probacc_bxhxwxk.data<scalar_t>(),
points2d_bxfx6.data<scalar_t>(),
grad_points2dprob_bxfx6.data<scalar_t>(), bnum, height, width, fnum,
knum, multiplier, sigmainv);
}));
return;
}
|
5ef519b13f78e98e97cab3e9a996b947d756b293.hip | // !!! This is a file automatically generated by hipify!!!
#define GRB_USE_APSPIE
#define private public
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime_api.h>
#include <boost/program_options.hpp>
#include "graphblas/graphblas.hpp"
#include "graphblas/algorithm/bfs.hpp"
#include "test/test.hpp"
bool debug_;
bool memory_;
int main( int argc, char** argv )
{
std::vector<graphblas::Index> row_indices;
std::vector<graphblas::Index> col_indices;
std::vector<float> values;
graphblas::Index nrows, ncols, nvals;
// Parse arguments
bool debug;
bool transpose;
bool mtxinfo;
int directed;
int niter;
int source;
po::variables_map vm;
// Read in sparse matrix
if (argc < 2) {
fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]);
exit(1);
} else {
parseArgs( argc, argv, vm );
debug = vm["debug" ].as<bool>();
transpose = vm["transpose"].as<bool>();
mtxinfo = vm["mtxinfo" ].as<bool>();
directed = vm["directed" ].as<int>();
niter = vm["niter" ].as<int>();
source = vm["source" ].as<int>();
// This is an imperfect solution, because this should happen in
// desc.loadArgs(vm) instead of application code!
// TODO: fix this
readMtx( argv[argc-1], row_indices, col_indices, values, nrows, ncols,
nvals, directed, mtxinfo );
}
// Descriptor desc
graphblas::Descriptor desc;
CHECK( desc.loadArgs(vm) );
CHECK( desc.toggle(graphblas::GrB_INP1) );
// Matrix A
graphblas::Matrix<float> a(nrows, ncols);
CHECK( a.build(&row_indices, &col_indices, &values, nvals, GrB_NULL,
argv[argc-1]) );
CHECK( a.nrows(&nrows) );
CHECK( a.ncols(&ncols) );
CHECK( a.nvals(&nvals) );
if( debug ) CHECK( a.print() );
// Vector v
graphblas::Vector<float> v(nrows);
// Cpu BFS
CpuTimer bfs_cpu;
graphblas::Index* h_bfs_cpu = (graphblas::Index*)malloc(nrows*
sizeof(graphblas::Index));
int depth = 10000;
bfs_cpu.Start();
graphblas::algorithm::bfsCpu( source, &a, h_bfs_cpu, depth, transpose );
bfs_cpu.Stop();
// Warmup
CpuTimer warmup;
warmup.Start();
graphblas::algorithm::bfs(&v, &a, source, &desc);
warmup.Stop();
std::vector<float> h_bfs_gpu;
CHECK( v.extractTuples(&h_bfs_gpu, &nrows) );
BOOST_ASSERT_LIST( h_bfs_cpu, h_bfs_gpu, nrows );
// Source randomization
std::mt19937 gen(0);
std::uniform_int_distribution<> dis(0,nrows-1);
// Benchmark
CpuTimer vxm_gpu;
//hipProfilerStart();
vxm_gpu.Start();
float tight = 0.f;
for( int i=0; i<niter; i++ )
{
source = dis(gen);
tight += graphblas::algorithm::bfs(&v, &a, source, &desc);
}
//hipProfilerStop();
vxm_gpu.Stop();
return 0;
}
| 5ef519b13f78e98e97cab3e9a996b947d756b293.cu | #define GRB_USE_APSPIE
#define private public
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cuda_profiler_api.h>
#include <boost/program_options.hpp>
#include "graphblas/graphblas.hpp"
#include "graphblas/algorithm/bfs.hpp"
#include "test/test.hpp"
bool debug_;
bool memory_;
int main( int argc, char** argv )
{
std::vector<graphblas::Index> row_indices;
std::vector<graphblas::Index> col_indices;
std::vector<float> values;
graphblas::Index nrows, ncols, nvals;
// Parse arguments
bool debug;
bool transpose;
bool mtxinfo;
int directed;
int niter;
int source;
po::variables_map vm;
// Read in sparse matrix
if (argc < 2) {
fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]);
exit(1);
} else {
parseArgs( argc, argv, vm );
debug = vm["debug" ].as<bool>();
transpose = vm["transpose"].as<bool>();
mtxinfo = vm["mtxinfo" ].as<bool>();
directed = vm["directed" ].as<int>();
niter = vm["niter" ].as<int>();
source = vm["source" ].as<int>();
// This is an imperfect solution, because this should happen in
// desc.loadArgs(vm) instead of application code!
// TODO: fix this
readMtx( argv[argc-1], row_indices, col_indices, values, nrows, ncols,
nvals, directed, mtxinfo );
}
// Descriptor desc
graphblas::Descriptor desc;
CHECK( desc.loadArgs(vm) );
CHECK( desc.toggle(graphblas::GrB_INP1) );
// Matrix A
graphblas::Matrix<float> a(nrows, ncols);
CHECK( a.build(&row_indices, &col_indices, &values, nvals, GrB_NULL,
argv[argc-1]) );
CHECK( a.nrows(&nrows) );
CHECK( a.ncols(&ncols) );
CHECK( a.nvals(&nvals) );
if( debug ) CHECK( a.print() );
// Vector v
graphblas::Vector<float> v(nrows);
// Cpu BFS
CpuTimer bfs_cpu;
graphblas::Index* h_bfs_cpu = (graphblas::Index*)malloc(nrows*
sizeof(graphblas::Index));
int depth = 10000;
bfs_cpu.Start();
graphblas::algorithm::bfsCpu( source, &a, h_bfs_cpu, depth, transpose );
bfs_cpu.Stop();
// Warmup
CpuTimer warmup;
warmup.Start();
graphblas::algorithm::bfs(&v, &a, source, &desc);
warmup.Stop();
std::vector<float> h_bfs_gpu;
CHECK( v.extractTuples(&h_bfs_gpu, &nrows) );
BOOST_ASSERT_LIST( h_bfs_cpu, h_bfs_gpu, nrows );
// Source randomization
std::mt19937 gen(0);
std::uniform_int_distribution<> dis(0,nrows-1);
// Benchmark
CpuTimer vxm_gpu;
//cudaProfilerStart();
vxm_gpu.Start();
float tight = 0.f;
for( int i=0; i<niter; i++ )
{
source = dis(gen);
tight += graphblas::algorithm::bfs(&v, &a, source, &desc);
}
//cudaProfilerStop();
vxm_gpu.Stop();
return 0;
}
|
b893442c43e96e225cb528a50722b557475be8c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "AddAndRefreshConnectionKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int node1 = 1;
int node2 = 1;
int *activityFlag = NULL;
hipMalloc(&activityFlag, XSIZE*YSIZE);
int *connection = NULL;
hipMalloc(&connection, XSIZE*YSIZE);
int *age = NULL;
hipMalloc(&age, XSIZE*YSIZE);
int maxCells = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
AddAndRefreshConnectionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, node1,node2,activityFlag,connection,age,maxCells);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
AddAndRefreshConnectionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, node1,node2,activityFlag,connection,age,maxCells);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
AddAndRefreshConnectionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, node1,node2,activityFlag,connection,age,maxCells);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b893442c43e96e225cb528a50722b557475be8c6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "AddAndRefreshConnectionKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int node1 = 1;
int node2 = 1;
int *activityFlag = NULL;
cudaMalloc(&activityFlag, XSIZE*YSIZE);
int *connection = NULL;
cudaMalloc(&connection, XSIZE*YSIZE);
int *age = NULL;
cudaMalloc(&age, XSIZE*YSIZE);
int maxCells = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
AddAndRefreshConnectionKernel<<<gridBlock,threadBlock>>>(node1,node2,activityFlag,connection,age,maxCells);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
AddAndRefreshConnectionKernel<<<gridBlock,threadBlock>>>(node1,node2,activityFlag,connection,age,maxCells);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
AddAndRefreshConnectionKernel<<<gridBlock,threadBlock>>>(node1,node2,activityFlag,connection,age,maxCells);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2790391b34c4e7d3df68193be27e586b881d902a.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel_max.cu"
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
#define RANGE 2048
void print_vector(int *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
if (argc == 3) {
tmpchar = argv[1]; //graph inputfile
file_format = atoi(argv[2]); //graph format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
srand(7);
// Allocate the CSR structure
csr_array *csr;
// Parse graph file and store into a CSR format
if (file_format == 1)
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
else if (file_format == 0)
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
else if (file_format == 2)
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
else {
printf("reserve for future");
exit(1);
}
// Allocate the vertex value array
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
// Set up kernel dimensions
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
const int num_gpu_threads = grid.x * threads.x;
int *cont = (int *)malloc(num_gpu_threads * sizeof(int));
for (int i = 0; i < num_gpu_threads; i++) {
cont[i] = false;
}
int *node_value = (int *)malloc(num_nodes * sizeof(int));
if (!node_value) fprintf(stderr, "node_value malloc failed\n");
// Allocate the color array
int *color = (int *)malloc(num_nodes * sizeof(int));
if (!color) fprintf(stderr, "color malloc failed\n");
// Initialize all the colors to -1
// Randomize the value for each vertex
for (int i = 0; i < num_nodes; i++) {
color[i] = -1;
node_value[i] = rand() % RANGE;
}
int *col_cnt = (int *)malloc(num_edges * sizeof(int));
for (int i = 0; i < num_nodes; ++i) {
const int start_edge = csr->row_array[i];
const int end_edge = csr->row_array[i + 1];
for (int edge = start_edge; edge < end_edge; ++edge) {
const int neighbor = csr->col_array[edge];
const int neigh_out_deg = csr->row_array[neighbor + 1] - csr->row_array[neighbor];
col_cnt[edge] = neigh_out_deg;
}
}
int *row_d;
int *col_d;
int *max_d;
int *col_cnt_d;
int *color_d;
int *node_value_d;
int *stop_d;
int *cont_d;
// Create device-side buffers for the graph
err = hipMalloc(&cont_d, num_gpu_threads * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc cont_d (size:%d) => %s\n", num_gpu_threads , hipGetErrorString(err));
return -1;
}
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes , hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d): %s\n", num_edges , hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_cnt_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d): %s\n", num_edges , hipGetErrorString(err));
return -1;
}
// Termination variable
err = hipMalloc(&stop_d, sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc stop_d (size:%d) => %s\n", 1 , hipGetErrorString(err));
return -1;
}
// Create device-side buffers for color
err = hipMalloc(&color_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc color_d (size:%d) => %s\n", num_nodes , hipGetErrorString(err));
return -1;
}
err = hipMalloc(&node_value_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc node_value_d (size:%d) => %s\n", num_nodes , hipGetErrorString(err));
return -1;
}
err = hipMalloc(&max_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc max_d (size:%d) => %s\n", num_nodes , hipGetErrorString(err));
return -1;
}
// Copy data to device-side buffers
double timer1 = gettime();
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
err = hipMemcpy(color_d, color, num_nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy color_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(cont_d, cont, num_gpu_threads * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy cont_d (size:%d) => %s\n", num_gpu_threads, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(max_d, color, num_nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy max_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy row_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_cnt_d, col_cnt, num_edges * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_cnt_d (size:%d) => %s\n", num_edges, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(node_value_d, node_value, num_nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy node_value_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
int stop = 1;
int graph_color = 1;
// Main computation loop
double timer3 = gettime();
bool conti = true;
//while (conti) {
for (int i = 0; i < num_nodes; i++) {
stop = 0;
// Copy the termination variable to the device
// Launch the color kernel 1
hipLaunchKernelGGL(( color1_pusho) , dim3(grid), dim3(threads) , 0, 0, row_d, col_d, node_value_d, col_cnt_d, color_d,
cont_d, max_d, graph_color, num_nodes,
num_edges);
// Launch the color kernel 2
hipLaunchKernelGGL(( color2_push) , dim3(grid), dim3(threads) , 0, 0, node_value_d, color_d, max_d, graph_color,
num_nodes, num_edges);
err = hipMemcpy(cont, cont_d, num_gpu_threads * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: read cont_d: %s\n", hipGetErrorString(err));
}
conti = false;
for (int j = 0; j < num_gpu_threads; ++j) {
if (cont[j]) {
conti = true;
break;
}
}
if (!conti) {
break;
}
// Increment the color for the next iter
graph_color++;
}
hipDeviceSynchronize();
double timer4 = gettime();
// Copy back the color array
err = hipMemcpy(color, color_d, num_nodes * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("ERROR: hipMemcpy(): %s\n", hipGetErrorString(err));
return -1;
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double timer2 = gettime();
// Print out color and timing statistics
printf("total number of colors used: %d\n", graph_color);
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Dump the color array into an output file
print_vector(color, num_nodes);
#endif
// Free host-side buffers
free(node_value);
free(color);
free(cont);
free(col_cnt);
csr->freeArrays();
free(csr);
// Free CUDA buffers
hipFree(row_d);
hipFree(col_d);
hipFree(max_d);
hipFree(color_d);
hipFree(node_value_d);
hipFree(stop_d);
hipFree(col_cnt_d);
hipFree(cont_d);
return 0;
}
void print_vector(int *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++)
fprintf(fp, "%d: %d\n", i + 1, vector[i]);
fclose(fp);
}
| 2790391b34c4e7d3df68193be27e586b881d902a.cu | /************************************************************************************\
* *
* Copyright © 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel_max.cu"
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
#define RANGE 2048
void print_vector(int *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; //graph inputfile
file_format = atoi(argv[2]); //graph format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
srand(7);
// Allocate the CSR structure
csr_array *csr;
// Parse graph file and store into a CSR format
if (file_format == 1)
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
else if (file_format == 0)
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
else if (file_format == 2)
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
else {
printf("reserve for future");
exit(1);
}
// Allocate the vertex value array
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
// Set up kernel dimensions
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
const int num_gpu_threads = grid.x * threads.x;
int *cont = (int *)malloc(num_gpu_threads * sizeof(int));
for (int i = 0; i < num_gpu_threads; i++) {
cont[i] = false;
}
int *node_value = (int *)malloc(num_nodes * sizeof(int));
if (!node_value) fprintf(stderr, "node_value malloc failed\n");
// Allocate the color array
int *color = (int *)malloc(num_nodes * sizeof(int));
if (!color) fprintf(stderr, "color malloc failed\n");
// Initialize all the colors to -1
// Randomize the value for each vertex
for (int i = 0; i < num_nodes; i++) {
color[i] = -1;
node_value[i] = rand() % RANGE;
}
int *col_cnt = (int *)malloc(num_edges * sizeof(int));
for (int i = 0; i < num_nodes; ++i) {
const int start_edge = csr->row_array[i];
const int end_edge = csr->row_array[i + 1];
for (int edge = start_edge; edge < end_edge; ++edge) {
const int neighbor = csr->col_array[edge];
const int neigh_out_deg = csr->row_array[neighbor + 1] - csr->row_array[neighbor];
col_cnt[edge] = neigh_out_deg;
}
}
int *row_d;
int *col_d;
int *max_d;
int *col_cnt_d;
int *color_d;
int *node_value_d;
int *stop_d;
int *cont_d;
// Create device-side buffers for the graph
err = cudaMalloc(&cont_d, num_gpu_threads * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc cont_d (size:%d) => %s\n", num_gpu_threads , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d): %s\n", num_edges , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_cnt_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d): %s\n", num_edges , cudaGetErrorString(err));
return -1;
}
// Termination variable
err = cudaMalloc(&stop_d, sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc stop_d (size:%d) => %s\n", 1 , cudaGetErrorString(err));
return -1;
}
// Create device-side buffers for color
err = cudaMalloc(&color_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc color_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&node_value_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc node_value_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&max_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc max_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
// Copy data to device-side buffers
double timer1 = gettime();
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
err = cudaMemcpy(color_d, color, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy color_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(cont_d, cont, num_gpu_threads * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy cont_d (size:%d) => %s\n", num_gpu_threads, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(max_d, color, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy max_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_cnt_d, col_cnt, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_cnt_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(node_value_d, node_value, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy node_value_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
int stop = 1;
int graph_color = 1;
// Main computation loop
double timer3 = gettime();
bool conti = true;
//while (conti) {
for (int i = 0; i < num_nodes; i++) {
stop = 0;
// Copy the termination variable to the device
// Launch the color kernel 1
color1_pusho <<< grid, threads >>>(row_d, col_d, node_value_d, col_cnt_d, color_d,
cont_d, max_d, graph_color, num_nodes,
num_edges);
// Launch the color kernel 2
color2_push <<< grid, threads >>>(node_value_d, color_d, max_d, graph_color,
num_nodes, num_edges);
err = cudaMemcpy(cont, cont_d, num_gpu_threads * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: read cont_d: %s\n", cudaGetErrorString(err));
}
conti = false;
for (int j = 0; j < num_gpu_threads; ++j) {
if (cont[j]) {
conti = true;
break;
}
}
if (!conti) {
break;
}
// Increment the color for the next iter
graph_color++;
}
cudaThreadSynchronize();
double timer4 = gettime();
// Copy back the color array
err = cudaMemcpy(color, color_d, num_nodes * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("ERROR: cudaMemcpy(): %s\n", cudaGetErrorString(err));
return -1;
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double timer2 = gettime();
// Print out color and timing statistics
printf("total number of colors used: %d\n", graph_color);
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Dump the color array into an output file
print_vector(color, num_nodes);
#endif
// Free host-side buffers
free(node_value);
free(color);
free(cont);
free(col_cnt);
csr->freeArrays();
free(csr);
// Free CUDA buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(max_d);
cudaFree(color_d);
cudaFree(node_value_d);
cudaFree(stop_d);
cudaFree(col_cnt_d);
cudaFree(cont_d);
return 0;
}
void print_vector(int *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++)
fprintf(fp, "%d: %d\n", i + 1, vector[i]);
fclose(fp);
}
|
5397e7c27f7551782345b53e719f1c8007de5029.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstdio>
#include "../include/slic.h"
__device__ __constant__ float slic_factor;
void initializeSlicFactor()
{
const float * slic_factor_hp = &slic_factor_h;
hipError_t cudaStatus = hipMemcpyToSymbol(slic_factor, slic_factor_hp, sizeof(float));
}
__global__ void k_cumulativeCountOrig(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data)
{
//if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0)
//{
//printf("k\n");
//}
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i = d_own_data[pix_index].i;
int j = d_own_data[pix_index].j;
int spx_index = j * spx_width + i;
atomicAdd(&(d_spx_data[spx_index].accum[0]), d_pix_data[pix_index].l);
atomicAdd(&(d_spx_data[spx_index].accum[1]), d_pix_data[pix_index].a);
atomicAdd(&(d_spx_data[spx_index].accum[2]), d_pix_data[pix_index].b);
atomicAdd(&(d_spx_data[spx_index].accum[3]), 1);
atomicAdd(&(d_spx_data[spx_index].accum[4]), x);
atomicAdd(&(d_spx_data[spx_index].accum[5]), y);
}
}
__global__ void k_cumulativeCountOpt1(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data)
{
// If we do 16 instead of 8, only have enough memory for a short, not an int,
// and 16*32*255 does not fit in a short
__shared__ int acc[6][3][3][4][32]; //LAB+count, 3x3 neighbors, 8x32 values
int tidx=threadIdx.x;
int tidy=threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
for (int nx=0;nx<3;++nx) for (int ny=0;ny<3;++ny) for(int c=0;c<6;++c) acc[c][ny][nx][tidy][tidx]=0;
int i_center = blockIdx.x * blockDim.x / spx_size;
int j_center = blockIdx.y * blockDim.y / spx_size;
int pix_index = y * pix_width + x;
int i = d_own_data[pix_index].i;
int j = d_own_data[pix_index].j;
int nx = (i<i_center) ? 0 : ((i>i_center) ? 2 : 1);
int ny = (j<j_center) ? 0 : ((j>j_center) ? 2 : 1);
acc[0][ny][nx][tidy][tidx] = d_pix_data[pix_index].l;
acc[1][ny][nx][tidy][tidx] = d_pix_data[pix_index].a;
acc[2][ny][nx][tidy][tidx] = d_pix_data[pix_index].b;
acc[3][ny][nx][tidy][tidx] = 1;
acc[4][ny][nx][tidy][tidx] = x;
acc[5][ny][nx][tidy][tidx] = y;
__syncthreads();
// Collapse over X and Y
int tid = tidy * blockDim.x + tidx;
for (int step=1; step<32*4; step *= 2)
{
if (tid % (2*step) == 0)
{
for (int ny=0; ny<3; ny++)
for (int nx=0; nx<3; nx++)
for (int c=0; c<6; c++)
*((int*)acc[c][ny][nx] + tid) += *((int*)acc[c][ny][nx] + tid + step);
}
__syncthreads();
}
// Is this ok? See https://stackoverflow.com/questions/6666382/can-i-use-syncthreads-after-having-dropped-threads
// TODO: Use these threads for nx, ny, c loop
if (tidy != 0) return;
// Now, acc[c][ny][nx][0][0] has the values we need
// but where do we write them to?
// Just one warp so no syncThreads (TODO)
if (tidx != 0) return;
for (int ny=0; ny<3; ny++)
{
int j = j_center + ny - 1;
if (j<0 || j>=spx_height) continue;
for (int nx=0; nx<3; nx++)
{
int i = i_center + nx - 1;
if (i<0 || i>=spx_width) continue;
int spx_index = j * spx_width + i;
atomicAdd(&(d_spx_data[spx_index].accum[0]), (int)acc[0][ny][nx][0][0]);
atomicAdd(&(d_spx_data[spx_index].accum[1]), (int)acc[1][ny][nx][0][0]);
atomicAdd(&(d_spx_data[spx_index].accum[2]), (int)acc[2][ny][nx][0][0]);
atomicAdd(&(d_spx_data[spx_index].accum[3]), (int)acc[3][ny][nx][0][0]);
atomicAdd(&(d_spx_data[spx_index].accum[4]), (int)acc[4][ny][nx][0][0]);
atomicAdd(&(d_spx_data[spx_index].accum[5]), (int)acc[5][ny][nx][0][0]);
}
}
}
__global__ void k_averaging(spx_data* d_spx_data)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < spx_width && j < spx_height)
{
int spx_index = j * spx_width + i;
d_spx_data[spx_index].l = d_spx_data[spx_index].accum[0] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].a = d_spx_data[spx_index].accum[1] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].b = d_spx_data[spx_index].accum[2] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].x = d_spx_data[spx_index].accum[4] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].y = d_spx_data[spx_index].accum[5] / d_spx_data[spx_index].accum[3];
}
}
__global__ void k_ownershipOpt(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
__shared__ spx_data spx[9 * 32];
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
int i_sign[9] = {-1, -1, -1, 0, 0, 0, 1, 1, 1};
int j_sign[9] = {-1, 0, 1, -1, 0, 1, -1, 0, 1};
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x % 3 == 0)// && threadIdx.y == 0)
{
int sh_idx = 0;
for (int i = i_center - window_size; i <= i_center + window_size; i++) // i = i_center - 1, i_center, i_center + 1
{
for(int j = j_center - window_size; j <= j_center + window_size; j++) // j = j_center - 1, j_center, j_center + 1
{
if (j < 0 || j >= spx_height || i < 0 || i > spx_width)
{
sh_idx++;
continue;
}
int spx_index = j * spx_width + i;
// if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0)
// printf("%i ::::: %i\n", spx_index, sh_idx);
spx[sh_idx + 8*blockIdx.x] = d_spx_data[spx_index];
if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2 || sh_idx == 3 || sh_idx == 4 || sh_idx == 5)) //Why blockIdx.x-1 > 0 crashes?
spx[sh_idx+3 + 8*(blockIdx.x-1)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2)) //Why blockIdx.x-1 > 0 crashes?
spx[sh_idx+6 + 8*(blockIdx.x-2)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x < blockDim.x && (sh_idx == 3 || sh_idx == 4 || sh_idx == 5 || sh_idx == 6 || sh_idx == 7 || sh_idx == 8))
spx[sh_idx-3 + 8*(blockIdx.x+1)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x < blockDim.x && (sh_idx == 6 || sh_idx == 7 || sh_idx == 8))
spx[sh_idx-6 + 8*(blockIdx.x+2)] = spx[sh_idx + 8*blockIdx.x];
sh_idx++;
}
}
}
__syncthreads();
for(int i=0; i<9; i++)
{
int l_dist = l-(int)(spx[i + 8*blockIdx.x].l);
l_dist *= l_dist;
int a_dist = a-(int)(spx[i + 8*blockIdx.x].a);
a_dist *= a_dist;
int b_dist = b-(int)(spx[i + 8*blockIdx.x].b);
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-(int)spx[i + 8*blockIdx.x].x;
x_dist *= x_dist;
int y_dist = y-(int)spx[i + 8*blockIdx.x].y;
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i_center + i_sign[i]*window_size;
min_j = j_center + j_sign[i]*window_size;
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
}
}
__global__ void k_ownershipOrig(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
for (int i = i_center - window_size; i <= i_center + window_size; i++)
{
if (i < 0 || i >= spx_width) continue;
for(int j = j_center - window_size; j <= j_center + window_size; j++)
{
if (j < 0 || j >= spx_height) continue;
int spx_index = j * spx_width + i;
int l_dist = l-(int)(d_spx_data[spx_index].l);
l_dist *= l_dist;
int a_dist = a-(int)(d_spx_data[spx_index].a);
a_dist *= a_dist;
int b_dist = b-(int)(d_spx_data[spx_index].b);
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-(int)d_spx_data[spx_index].x;
x_dist *= x_dist;
int y_dist = y-(int)d_spx_data[spx_index].y;
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i;
min_j = j;
}
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
//d_own_data[pix_index].i = (i_center / 4) * 4;
//d_own_data[pix_index].j = (j_center / 4) * 4;
}
}
__global__ void k_ownershipOpt2(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
__shared__ int spx[3][3][5]; // Y, X, LABXY
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
// Initialize SMEM
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int nx = tid % 3;
tid /= 3;
int ny = tid % 3;
tid /= 3;
if (tid < 5)
{
int value;
int i = i_center + nx - 1;
int j = j_center + ny - 1;
if (i<0 || i>=spx_width || j<0 || j>=spx_height)
{
value = -1;
}
else
{
int spx_index = j * spx_width + i;
const spx_data& spix = d_spx_data[spx_index];
switch(tid) //TODO:Get rid of it by using better data struct.?
{
case 0: value=spix.l; break;
case 1: value=spix.a; break;
case 2: value=spix.b; break;
case 3: value=spix.x; break;
case 4: value=spix.y; break;
}
}
spx[ny][nx][tid] = value;
}
__syncthreads();
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx)
{
int* spix = spx[ny][nx];
if (spix[0]==-1) continue;
int l_dist = l-spix[0];
l_dist *= l_dist;
int a_dist = a-spix[1];
a_dist *= a_dist;
int b_dist = b-spix[2];
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-spix[3];
x_dist *= x_dist;
int y_dist = y-spix[4];
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i_center + nx - 1;
min_j = j_center + ny - 1;
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
//d_own_data[pix_index].i = (i_center / 4) * 4;
//d_own_data[pix_index].j = (j_center / 4) * 4;
}
}
__global__ void k_reset(spx_data* d_spx_data)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < spx_width && j < spx_height)
{
int spx_index = j * spx_width + i;
d_spx_data[spx_index].accum[0] = 0;
d_spx_data[spx_index].accum[1] = 0;
d_spx_data[spx_index].accum[2] = 0;
d_spx_data[spx_index].accum[3] = 0;
d_spx_data[spx_index].accum[4] = 0;
d_spx_data[spx_index].accum[5] = 0;
}
}
| 5397e7c27f7551782345b53e719f1c8007de5029.cu | #include <cmath>
#include <cstdio>
#include "../include/slic.h"
__device__ __constant__ float slic_factor;
void initializeSlicFactor()
{
const float * slic_factor_hp = &slic_factor_h;
cudaError_t cudaStatus = cudaMemcpyToSymbol(slic_factor, slic_factor_hp, sizeof(float));
}
__global__ void k_cumulativeCountOrig(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data)
{
//if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0)
//{
//printf("k\n");
//}
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i = d_own_data[pix_index].i;
int j = d_own_data[pix_index].j;
int spx_index = j * spx_width + i;
atomicAdd(&(d_spx_data[spx_index].accum[0]), d_pix_data[pix_index].l);
atomicAdd(&(d_spx_data[spx_index].accum[1]), d_pix_data[pix_index].a);
atomicAdd(&(d_spx_data[spx_index].accum[2]), d_pix_data[pix_index].b);
atomicAdd(&(d_spx_data[spx_index].accum[3]), 1);
atomicAdd(&(d_spx_data[spx_index].accum[4]), x);
atomicAdd(&(d_spx_data[spx_index].accum[5]), y);
}
}
__global__ void k_cumulativeCountOpt1(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data)
{
// If we do 16 instead of 8, only have enough memory for a short, not an int,
// and 16*32*255 does not fit in a short
__shared__ int acc[6][3][3][4][32]; //LAB+count, 3x3 neighbors, 8x32 values
int tidx=threadIdx.x;
int tidy=threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
for (int nx=0;nx<3;++nx) for (int ny=0;ny<3;++ny) for(int c=0;c<6;++c) acc[c][ny][nx][tidy][tidx]=0;
int i_center = blockIdx.x * blockDim.x / spx_size;
int j_center = blockIdx.y * blockDim.y / spx_size;
int pix_index = y * pix_width + x;
int i = d_own_data[pix_index].i;
int j = d_own_data[pix_index].j;
int nx = (i<i_center) ? 0 : ((i>i_center) ? 2 : 1);
int ny = (j<j_center) ? 0 : ((j>j_center) ? 2 : 1);
acc[0][ny][nx][tidy][tidx] = d_pix_data[pix_index].l;
acc[1][ny][nx][tidy][tidx] = d_pix_data[pix_index].a;
acc[2][ny][nx][tidy][tidx] = d_pix_data[pix_index].b;
acc[3][ny][nx][tidy][tidx] = 1;
acc[4][ny][nx][tidy][tidx] = x;
acc[5][ny][nx][tidy][tidx] = y;
__syncthreads();
// Collapse over X and Y
int tid = tidy * blockDim.x + tidx;
for (int step=1; step<32*4; step *= 2)
{
if (tid % (2*step) == 0)
{
for (int ny=0; ny<3; ny++)
for (int nx=0; nx<3; nx++)
for (int c=0; c<6; c++)
*((int*)acc[c][ny][nx] + tid) += *((int*)acc[c][ny][nx] + tid + step);
}
__syncthreads();
}
// Is this ok? See https://stackoverflow.com/questions/6666382/can-i-use-syncthreads-after-having-dropped-threads
// TODO: Use these threads for nx, ny, c loop
if (tidy != 0) return;
// Now, acc[c][ny][nx][0][0] has the values we need
// but where do we write them to?
// Just one warp so no syncThreads (TODO)
if (tidx != 0) return;
for (int ny=0; ny<3; ny++)
{
int j = j_center + ny - 1;
if (j<0 || j>=spx_height) continue;
for (int nx=0; nx<3; nx++)
{
int i = i_center + nx - 1;
if (i<0 || i>=spx_width) continue;
int spx_index = j * spx_width + i;
atomicAdd(&(d_spx_data[spx_index].accum[0]), (int)acc[0][ny][nx][0][0]);
atomicAdd(&(d_spx_data[spx_index].accum[1]), (int)acc[1][ny][nx][0][0]);
atomicAdd(&(d_spx_data[spx_index].accum[2]), (int)acc[2][ny][nx][0][0]);
atomicAdd(&(d_spx_data[spx_index].accum[3]), (int)acc[3][ny][nx][0][0]);
atomicAdd(&(d_spx_data[spx_index].accum[4]), (int)acc[4][ny][nx][0][0]);
atomicAdd(&(d_spx_data[spx_index].accum[5]), (int)acc[5][ny][nx][0][0]);
}
}
}
__global__ void k_averaging(spx_data* d_spx_data)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < spx_width && j < spx_height)
{
int spx_index = j * spx_width + i;
d_spx_data[spx_index].l = d_spx_data[spx_index].accum[0] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].a = d_spx_data[spx_index].accum[1] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].b = d_spx_data[spx_index].accum[2] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].x = d_spx_data[spx_index].accum[4] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].y = d_spx_data[spx_index].accum[5] / d_spx_data[spx_index].accum[3];
}
}
__global__ void k_ownershipOpt(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
__shared__ spx_data spx[9 * 32];
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
int i_sign[9] = {-1, -1, -1, 0, 0, 0, 1, 1, 1};
int j_sign[9] = {-1, 0, 1, -1, 0, 1, -1, 0, 1};
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x % 3 == 0)// && threadIdx.y == 0)
{
int sh_idx = 0;
for (int i = i_center - window_size; i <= i_center + window_size; i++) // i = i_center - 1, i_center, i_center + 1
{
for(int j = j_center - window_size; j <= j_center + window_size; j++) // j = j_center - 1, j_center, j_center + 1
{
if (j < 0 || j >= spx_height || i < 0 || i > spx_width)
{
sh_idx++;
continue;
}
int spx_index = j * spx_width + i;
// if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0)
// printf("%i ::::: %i\n", spx_index, sh_idx);
spx[sh_idx + 8*blockIdx.x] = d_spx_data[spx_index];
if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2 || sh_idx == 3 || sh_idx == 4 || sh_idx == 5)) //Why blockIdx.x-1 > 0 crashes?
spx[sh_idx+3 + 8*(blockIdx.x-1)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2)) //Why blockIdx.x-1 > 0 crashes?
spx[sh_idx+6 + 8*(blockIdx.x-2)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x < blockDim.x && (sh_idx == 3 || sh_idx == 4 || sh_idx == 5 || sh_idx == 6 || sh_idx == 7 || sh_idx == 8))
spx[sh_idx-3 + 8*(blockIdx.x+1)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x < blockDim.x && (sh_idx == 6 || sh_idx == 7 || sh_idx == 8))
spx[sh_idx-6 + 8*(blockIdx.x+2)] = spx[sh_idx + 8*blockIdx.x];
sh_idx++;
}
}
}
__syncthreads();
for(int i=0; i<9; i++)
{
int l_dist = l-(int)(spx[i + 8*blockIdx.x].l);
l_dist *= l_dist;
int a_dist = a-(int)(spx[i + 8*blockIdx.x].a);
a_dist *= a_dist;
int b_dist = b-(int)(spx[i + 8*blockIdx.x].b);
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-(int)spx[i + 8*blockIdx.x].x;
x_dist *= x_dist;
int y_dist = y-(int)spx[i + 8*blockIdx.x].y;
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i_center + i_sign[i]*window_size;
min_j = j_center + j_sign[i]*window_size;
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
}
}
__global__ void k_ownershipOrig(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
for (int i = i_center - window_size; i <= i_center + window_size; i++)
{
if (i < 0 || i >= spx_width) continue;
for(int j = j_center - window_size; j <= j_center + window_size; j++)
{
if (j < 0 || j >= spx_height) continue;
int spx_index = j * spx_width + i;
int l_dist = l-(int)(d_spx_data[spx_index].l);
l_dist *= l_dist;
int a_dist = a-(int)(d_spx_data[spx_index].a);
a_dist *= a_dist;
int b_dist = b-(int)(d_spx_data[spx_index].b);
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-(int)d_spx_data[spx_index].x;
x_dist *= x_dist;
int y_dist = y-(int)d_spx_data[spx_index].y;
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i;
min_j = j;
}
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
//d_own_data[pix_index].i = (i_center / 4) * 4;
//d_own_data[pix_index].j = (j_center / 4) * 4;
}
}
__global__ void k_ownershipOpt2(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
__shared__ int spx[3][3][5]; // Y, X, LABXY
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
// Initialize SMEM
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int nx = tid % 3;
tid /= 3;
int ny = tid % 3;
tid /= 3;
if (tid < 5)
{
int value;
int i = i_center + nx - 1;
int j = j_center + ny - 1;
if (i<0 || i>=spx_width || j<0 || j>=spx_height)
{
value = -1;
}
else
{
int spx_index = j * spx_width + i;
const spx_data& spix = d_spx_data[spx_index];
switch(tid) //TODO:Get rid of it by using better data struct.?
{
case 0: value=spix.l; break;
case 1: value=spix.a; break;
case 2: value=spix.b; break;
case 3: value=spix.x; break;
case 4: value=spix.y; break;
}
}
spx[ny][nx][tid] = value;
}
__syncthreads();
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx)
{
int* spix = spx[ny][nx];
if (spix[0]==-1) continue;
int l_dist = l-spix[0];
l_dist *= l_dist;
int a_dist = a-spix[1];
a_dist *= a_dist;
int b_dist = b-spix[2];
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-spix[3];
x_dist *= x_dist;
int y_dist = y-spix[4];
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i_center + nx - 1;
min_j = j_center + ny - 1;
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
//d_own_data[pix_index].i = (i_center / 4) * 4;
//d_own_data[pix_index].j = (j_center / 4) * 4;
}
}
__global__ void k_reset(spx_data* d_spx_data)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < spx_width && j < spx_height)
{
int spx_index = j * spx_width + i;
d_spx_data[spx_index].accum[0] = 0;
d_spx_data[spx_index].accum[1] = 0;
d_spx_data[spx_index].accum[2] = 0;
d_spx_data[spx_index].accum[3] = 0;
d_spx_data[spx_index].accum[4] = 0;
d_spx_data[spx_index].accum[5] = 0;
}
}
|
b150bd63c2f2b3972db36ec86625dfde95110348.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "remove_occluded.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int size3 = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
remove_occluded), dim3(gridBlock),dim3(threadBlock), 0, 0, y,size,size3);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
remove_occluded), dim3(gridBlock),dim3(threadBlock), 0, 0, y,size,size3);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
remove_occluded), dim3(gridBlock),dim3(threadBlock), 0, 0, y,size,size3);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b150bd63c2f2b3972db36ec86625dfde95110348.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "remove_occluded.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int size3 = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
remove_occluded<<<gridBlock,threadBlock>>>(y,size,size3);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
remove_occluded<<<gridBlock,threadBlock>>>(y,size,size3);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
remove_occluded<<<gridBlock,threadBlock>>>(y,size,size3);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2e14101331549c72b0296c0c59c0a39e079d1c94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C"
__global__ void __launch_bounds__(128) sgemm_nn_vec_128x32
(
unsigned* param_Rand,
const float* param_A,
const float* param_B,
float* param_C,
int param_lda,
int param_ldb8,
int param_ldc,
int param_m,
int param_n,
int param_k,
float param_alpha,
float param_beta,
int param_flags,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
)
{
__shared__ float share[(128*16 + 32)*2 + 32*16*2 + 4];
int tid = threadIdx.x;
share[tid] = 1;
param_C[tid] = share[127-tid];
}
| 2e14101331549c72b0296c0c59c0a39e079d1c94.cu | /*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C"
__global__ void __launch_bounds__(128) sgemm_nn_vec_128x32
(
unsigned* param_Rand,
const float* param_A,
const float* param_B,
float* param_C,
int param_lda,
int param_ldb8,
int param_ldc,
int param_m,
int param_n,
int param_k,
float param_alpha,
float param_beta,
int param_flags,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
)
{
__shared__ float share[(128*16 + 32)*2 + 32*16*2 + 4];
int tid = threadIdx.x;
share[tid] = 1;
param_C[tid] = share[127-tid];
}
|
3bffa118248b2e5000d1c19d60c934f29fb6d4e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cmath>
__global__
void add(int n, float* x, float* y) {
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
int main() {
int N = 1 << 28;
float *x, *y;
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
for (int i = 0; i < N; ++i) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipLaunchKernelGGL(( add), dim3(1), dim3(256), 0, 0, N, x, y);
hipDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
hipFree(x);
hipFree(y);
return 0;
}
| 3bffa118248b2e5000d1c19d60c934f29fb6d4e0.cu | #include <iostream>
#include <cmath>
__global__
void add(int n, float* x, float* y) {
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
int main() {
int N = 1 << 28;
float *x, *y;
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
for (int i = 0; i < N; ++i) {
x[i] = 1.0f;
y[i] = 2.0f;
}
add<<<1, 256>>>(N, x, y);
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
cudaFree(x);
cudaFree(y);
return 0;
}
|
311f7955c34b2a4db896e12e6f15797675a9b66c.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace arithm
{
struct VAbsDiff4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff4(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff4() {}
__host__ __device__ __forceinline__ VAbsDiff4(const VAbsDiff4&) {}
};
struct VAbsDiff2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff2(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff2() {}
__host__ __device__ __forceinline__ VAbsDiff2(const VAbsDiff2&) {}
};
__device__ __forceinline__ int _abs(int a)
{
return ::abs(a);
}
__device__ __forceinline__ float _abs(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ double _abs(double a)
{
return ::fabs(a);
}
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(_abs(a - b));
}
__host__ __device__ __forceinline__ AbsDiffMat() {}
__host__ __device__ __forceinline__ AbsDiffMat(const AbsDiffMat&) {}
};
}
namespace cv { namespace cuda { namespace device
{
template <> struct TransformFunctorTraits< arithm::VAbsDiff4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VAbsDiff2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void absDiffMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
device::transform(src1, src2, dst, VAbsDiff4(), WithOutMask(), stream);
}
void absDiffMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
device::transform(src1, src2, dst, VAbsDiff2(), WithOutMask(), stream);
}
template <typename T>
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
}
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
}
#endif // CUDA_DISABLER
| 311f7955c34b2a4db896e12e6f15797675a9b66c.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace arithm
{
struct VAbsDiff4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff4(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff4() {}
__host__ __device__ __forceinline__ VAbsDiff4(const VAbsDiff4&) {}
};
struct VAbsDiff2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff2(a, b);
}
__host__ __device__ __forceinline__ VAbsDiff2() {}
__host__ __device__ __forceinline__ VAbsDiff2(const VAbsDiff2&) {}
};
__device__ __forceinline__ int _abs(int a)
{
return ::abs(a);
}
__device__ __forceinline__ float _abs(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ double _abs(double a)
{
return ::fabs(a);
}
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(_abs(a - b));
}
__host__ __device__ __forceinline__ AbsDiffMat() {}
__host__ __device__ __forceinline__ AbsDiffMat(const AbsDiffMat&) {}
};
}
namespace cv { namespace cuda { namespace device
{
template <> struct TransformFunctorTraits< arithm::VAbsDiff4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VAbsDiff2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void absDiffMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
device::transform(src1, src2, dst, VAbsDiff4(), WithOutMask(), stream);
}
void absDiffMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
device::transform(src1, src2, dst, VAbsDiff2(), WithOutMask(), stream);
}
template <typename T>
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
}
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
}
#endif // CUDA_DISABLER
|
22e2e564b1ee3732c95603653fca6e2d5b1c60e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitsUtilities.h"
#include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitSoADevice.h"
namespace testTrackingRecHitSoA {
template <typename TrackerTraits>
__global__ void fill(TrackingRecHitSoAView<TrackerTraits> soa) {
int i = threadIdx.x;
int j = blockIdx.x;
if (i == 0 and j == 0) {
soa.offsetBPIX2() = 22;
soa[10].xLocal() = 1.11;
}
soa[i].iphi() = i % 10;
soa.hitsLayerStart()[j] = j;
__syncthreads();
}
template <typename TrackerTraits>
__global__ void show(TrackingRecHitSoAView<TrackerTraits> soa) {
int i = threadIdx.x;
int j = blockIdx.x;
if (i == 0 and j == 0) {
printf("nbins = %d \n", soa.phiBinner().nbins());
printf("offsetBPIX %d ->%d \n", i, soa.offsetBPIX2());
printf("nHits %d ->%d \n", i, soa.nHits());
printf("hitsModuleStart %d ->%d \n", i, soa.hitsModuleStart().at(28));
}
if (i < 10) // can be increased to soa.nHits() for debugging
printf("iPhi %d ->%d \n", i, soa[i].iphi());
if (j * blockDim.x + i < 10) // can be increased to soa.phiBinner().nbins() for debugging
printf(">bin size %d ->%d \n", j * blockDim.x + i, soa.phiBinner().size(j * blockDim.x + i));
__syncthreads();
}
template <typename TrackerTraits>
void runKernels(TrackingRecHitSoADevice<TrackerTraits>& hits, hipStream_t stream) {
printf("> RUN!\n");
hipLaunchKernelGGL(( fill<TrackerTraits>), dim3(10), dim3(100), 0, stream, hits.view());
cudaCheck(hipDeviceSynchronize());
cms::cuda::fillManyFromVector(&(hits.view().phiBinner()),
10,
hits.view().iphi(),
hits.view().hitsLayerStart().data(),
2000,
256,
hits.view().phiBinnerStorage(),
stream);
cudaCheck(hipDeviceSynchronize());
hipLaunchKernelGGL(( show<TrackerTraits>), dim3(10), dim3(1000), 0, stream, hits.view());
cudaCheck(hipDeviceSynchronize());
}
template void runKernels<pixelTopology::Phase1>(TrackingRecHitSoADevice<pixelTopology::Phase1>& hits,
hipStream_t stream);
template void runKernels<pixelTopology::Phase2>(TrackingRecHitSoADevice<pixelTopology::Phase2>& hits,
hipStream_t stream);
} // namespace testTrackingRecHitSoA
| 22e2e564b1ee3732c95603653fca6e2d5b1c60e1.cu | #include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitsUtilities.h"
#include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitSoADevice.h"
namespace testTrackingRecHitSoA {
template <typename TrackerTraits>
__global__ void fill(TrackingRecHitSoAView<TrackerTraits> soa) {
int i = threadIdx.x;
int j = blockIdx.x;
if (i == 0 and j == 0) {
soa.offsetBPIX2() = 22;
soa[10].xLocal() = 1.11;
}
soa[i].iphi() = i % 10;
soa.hitsLayerStart()[j] = j;
__syncthreads();
}
template <typename TrackerTraits>
__global__ void show(TrackingRecHitSoAView<TrackerTraits> soa) {
int i = threadIdx.x;
int j = blockIdx.x;
if (i == 0 and j == 0) {
printf("nbins = %d \n", soa.phiBinner().nbins());
printf("offsetBPIX %d ->%d \n", i, soa.offsetBPIX2());
printf("nHits %d ->%d \n", i, soa.nHits());
printf("hitsModuleStart %d ->%d \n", i, soa.hitsModuleStart().at(28));
}
if (i < 10) // can be increased to soa.nHits() for debugging
printf("iPhi %d ->%d \n", i, soa[i].iphi());
if (j * blockDim.x + i < 10) // can be increased to soa.phiBinner().nbins() for debugging
printf(">bin size %d ->%d \n", j * blockDim.x + i, soa.phiBinner().size(j * blockDim.x + i));
__syncthreads();
}
template <typename TrackerTraits>
void runKernels(TrackingRecHitSoADevice<TrackerTraits>& hits, cudaStream_t stream) {
printf("> RUN!\n");
fill<TrackerTraits><<<10, 100, 0, stream>>>(hits.view());
cudaCheck(cudaDeviceSynchronize());
cms::cuda::fillManyFromVector(&(hits.view().phiBinner()),
10,
hits.view().iphi(),
hits.view().hitsLayerStart().data(),
2000,
256,
hits.view().phiBinnerStorage(),
stream);
cudaCheck(cudaDeviceSynchronize());
show<TrackerTraits><<<10, 1000, 0, stream>>>(hits.view());
cudaCheck(cudaDeviceSynchronize());
}
template void runKernels<pixelTopology::Phase1>(TrackingRecHitSoADevice<pixelTopology::Phase1>& hits,
cudaStream_t stream);
template void runKernels<pixelTopology::Phase2>(TrackingRecHitSoADevice<pixelTopology::Phase2>& hits,
cudaStream_t stream);
} // namespace testTrackingRecHitSoA
|
3b4216f3911119339da4d69c73502b80d51ab73f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel1_z_nonvector;
int xdim0_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim0_advec_mom_kernel1_z_nonvector;
int ydim0_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim1_advec_mom_kernel1_z_nonvector;
int xdim1_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim1_advec_mom_kernel1_z_nonvector;
int ydim1_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim2_advec_mom_kernel1_z_nonvector;
int xdim2_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim2_advec_mom_kernel1_z_nonvector;
int ydim2_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim3_advec_mom_kernel1_z_nonvector;
int xdim3_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim3_advec_mom_kernel1_z_nonvector;
int ydim3_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim4_advec_mom_kernel1_z_nonvector;
int xdim4_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim4_advec_mom_kernel1_z_nonvector;
int ydim4_advec_mom_kernel1_z_nonvector_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel1_z_nonvector*(y)+xdim0_advec_mom_kernel1_z_nonvector*ydim0_advec_mom_kernel1_z_nonvector*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel1_z_nonvector*(y)+xdim1_advec_mom_kernel1_z_nonvector*ydim1_advec_mom_kernel1_z_nonvector*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel1_z_nonvector*(y)+xdim2_advec_mom_kernel1_z_nonvector*ydim2_advec_mom_kernel1_z_nonvector*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel1_z_nonvector*(y)+xdim3_advec_mom_kernel1_z_nonvector*ydim3_advec_mom_kernel1_z_nonvector*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_advec_mom_kernel1_z_nonvector*(y)+xdim4_advec_mom_kernel1_z_nonvector*ydim4_advec_mom_kernel1_z_nonvector*(z))
//user function
__device__
inline void advec_mom_kernel1_z_nonvector_gpu( const double *node_flux, const double *node_mass_pre,
double *mom_flux,
const double *celldz, const double *vel1) {
double sigma, wind, width;
double vdiffuw, vdiffdw, auw, adw, limiter;
int upwind, donor, downwind, dif;
double advec_vel_temp;
if( (node_flux[OPS_ACC0(0,0,0)]) < 0.0) {
upwind = 2;
donor = 1;
downwind = 0;
dif = donor;
} else {
upwind = -1;
donor = 0;
downwind = 1;
dif = upwind;
}
sigma = fabs(node_flux[OPS_ACC0(0,0,0)])/node_mass_pre[OPS_ACC1(0,0,donor)];
width = celldz[OPS_ACC3(0,0,0)];
vdiffuw = vel1[OPS_ACC4(0,0,donor)] - vel1[OPS_ACC4(0,0,upwind)];
vdiffdw = vel1[OPS_ACC4(0,0,downwind)] - vel1[OPS_ACC4(0,0,donor)];
limiter = 0.0;
if(vdiffuw*vdiffdw > 0.0) {
auw = fabs(vdiffuw);
adw = fabs(vdiffdw);
wind = 1.0;
if(vdiffdw <= 0.0) wind = -1.0;
limiter=wind*MIN(width*((2.0-sigma)*adw/width+(1.0+sigma)*auw/celldz[OPS_ACC3(0,0,dif)])/6.0,MIN(auw,adw));
}
advec_vel_temp= vel1[OPS_ACC4(0,0,donor)] + (1.0 - sigma) * limiter;
mom_flux[OPS_ACC2(0,0,0)] = advec_vel_temp * node_flux[OPS_ACC0(0,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_advec_mom_kernel1_z_nonvector(
const double* __restrict arg0,
const double* __restrict arg1,
double* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel1_z_nonvector + idx_z * 1*1 * xdim0_advec_mom_kernel1_z_nonvector * ydim0_advec_mom_kernel1_z_nonvector;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel1_z_nonvector + idx_z * 1*1 * xdim1_advec_mom_kernel1_z_nonvector * ydim1_advec_mom_kernel1_z_nonvector;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel1_z_nonvector + idx_z * 1*1 * xdim2_advec_mom_kernel1_z_nonvector * ydim2_advec_mom_kernel1_z_nonvector;
arg3 += idx_x * 0*1 + idx_y * 0*1 * xdim3_advec_mom_kernel1_z_nonvector + idx_z * 1*1 * xdim3_advec_mom_kernel1_z_nonvector * ydim3_advec_mom_kernel1_z_nonvector;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_mom_kernel1_z_nonvector + idx_z * 1*1 * xdim4_advec_mom_kernel1_z_nonvector * ydim4_advec_mom_kernel1_z_nonvector;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel1_z_nonvector_gpu(arg0, arg1, arg2, arg3,
arg4);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel1_z_nonvector(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
#else
void ops_par_loop_advec_mom_kernel1_z_nonvector_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,5,range,137)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(137,"advec_mom_kernel1_z_nonvector");
OPS_kernels[137].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel1_z_nonvector_h || ydim0 != ydim0_advec_mom_kernel1_z_nonvector_h || xdim1 != xdim1_advec_mom_kernel1_z_nonvector_h || ydim1 != ydim1_advec_mom_kernel1_z_nonvector_h || xdim2 != xdim2_advec_mom_kernel1_z_nonvector_h || ydim2 != ydim2_advec_mom_kernel1_z_nonvector_h || xdim3 != xdim3_advec_mom_kernel1_z_nonvector_h || ydim3 != ydim3_advec_mom_kernel1_z_nonvector_h || xdim4 != xdim4_advec_mom_kernel1_z_nonvector_h || ydim4 != ydim4_advec_mom_kernel1_z_nonvector_h) {
hipMemcpyToSymbol( xdim0_advec_mom_kernel1_z_nonvector, &xdim0, sizeof(int) );
xdim0_advec_mom_kernel1_z_nonvector_h = xdim0;
hipMemcpyToSymbol( ydim0_advec_mom_kernel1_z_nonvector, &ydim0, sizeof(int) );
ydim0_advec_mom_kernel1_z_nonvector_h = ydim0;
hipMemcpyToSymbol( xdim1_advec_mom_kernel1_z_nonvector, &xdim1, sizeof(int) );
xdim1_advec_mom_kernel1_z_nonvector_h = xdim1;
hipMemcpyToSymbol( ydim1_advec_mom_kernel1_z_nonvector, &ydim1, sizeof(int) );
ydim1_advec_mom_kernel1_z_nonvector_h = ydim1;
hipMemcpyToSymbol( xdim2_advec_mom_kernel1_z_nonvector, &xdim2, sizeof(int) );
xdim2_advec_mom_kernel1_z_nonvector_h = xdim2;
hipMemcpyToSymbol( ydim2_advec_mom_kernel1_z_nonvector, &ydim2, sizeof(int) );
ydim2_advec_mom_kernel1_z_nonvector_h = ydim2;
hipMemcpyToSymbol( xdim3_advec_mom_kernel1_z_nonvector, &xdim3, sizeof(int) );
xdim3_advec_mom_kernel1_z_nonvector_h = xdim3;
hipMemcpyToSymbol( ydim3_advec_mom_kernel1_z_nonvector, &ydim3, sizeof(int) );
ydim3_advec_mom_kernel1_z_nonvector_h = ydim3;
hipMemcpyToSymbol( xdim4_advec_mom_kernel1_z_nonvector, &xdim4, sizeof(int) );
xdim4_advec_mom_kernel1_z_nonvector_h = xdim4;
hipMemcpyToSymbol( ydim4_advec_mom_kernel1_z_nonvector, &ydim4, sizeof(int) );
ydim4_advec_mom_kernel1_z_nonvector_h = ydim4;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[5];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args,5,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[137].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_mom_kernel1_z_nonvector), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[137].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[137].mpi_time += t2-t1;
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel1_z_nonvector(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 137;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 137;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 5;
desc->args = (ops_arg*)malloc(5*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->function = ops_par_loop_advec_mom_kernel1_z_nonvector_execute;
if (OPS_diags > 1) {
ops_timing_realloc(137,"advec_mom_kernel1_z_nonvector");
}
ops_enqueue_kernel(desc);
}
#endif
| 3b4216f3911119339da4d69c73502b80d51ab73f.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel1_z_nonvector;
int xdim0_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim0_advec_mom_kernel1_z_nonvector;
int ydim0_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim1_advec_mom_kernel1_z_nonvector;
int xdim1_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim1_advec_mom_kernel1_z_nonvector;
int ydim1_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim2_advec_mom_kernel1_z_nonvector;
int xdim2_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim2_advec_mom_kernel1_z_nonvector;
int ydim2_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim3_advec_mom_kernel1_z_nonvector;
int xdim3_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim3_advec_mom_kernel1_z_nonvector;
int ydim3_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int xdim4_advec_mom_kernel1_z_nonvector;
int xdim4_advec_mom_kernel1_z_nonvector_h = -1;
__constant__ int ydim4_advec_mom_kernel1_z_nonvector;
int ydim4_advec_mom_kernel1_z_nonvector_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel1_z_nonvector*(y)+xdim0_advec_mom_kernel1_z_nonvector*ydim0_advec_mom_kernel1_z_nonvector*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel1_z_nonvector*(y)+xdim1_advec_mom_kernel1_z_nonvector*ydim1_advec_mom_kernel1_z_nonvector*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel1_z_nonvector*(y)+xdim2_advec_mom_kernel1_z_nonvector*ydim2_advec_mom_kernel1_z_nonvector*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel1_z_nonvector*(y)+xdim3_advec_mom_kernel1_z_nonvector*ydim3_advec_mom_kernel1_z_nonvector*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_advec_mom_kernel1_z_nonvector*(y)+xdim4_advec_mom_kernel1_z_nonvector*ydim4_advec_mom_kernel1_z_nonvector*(z))
//user function
__device__
inline void advec_mom_kernel1_z_nonvector_gpu( const double *node_flux, const double *node_mass_pre,
double *mom_flux,
const double *celldz, const double *vel1) {
double sigma, wind, width;
double vdiffuw, vdiffdw, auw, adw, limiter;
int upwind, donor, downwind, dif;
double advec_vel_temp;
if( (node_flux[OPS_ACC0(0,0,0)]) < 0.0) {
upwind = 2;
donor = 1;
downwind = 0;
dif = donor;
} else {
upwind = -1;
donor = 0;
downwind = 1;
dif = upwind;
}
sigma = fabs(node_flux[OPS_ACC0(0,0,0)])/node_mass_pre[OPS_ACC1(0,0,donor)];
width = celldz[OPS_ACC3(0,0,0)];
vdiffuw = vel1[OPS_ACC4(0,0,donor)] - vel1[OPS_ACC4(0,0,upwind)];
vdiffdw = vel1[OPS_ACC4(0,0,downwind)] - vel1[OPS_ACC4(0,0,donor)];
limiter = 0.0;
if(vdiffuw*vdiffdw > 0.0) {
auw = fabs(vdiffuw);
adw = fabs(vdiffdw);
wind = 1.0;
if(vdiffdw <= 0.0) wind = -1.0;
limiter=wind*MIN(width*((2.0-sigma)*adw/width+(1.0+sigma)*auw/celldz[OPS_ACC3(0,0,dif)])/6.0,MIN(auw,adw));
}
advec_vel_temp= vel1[OPS_ACC4(0,0,donor)] + (1.0 - sigma) * limiter;
mom_flux[OPS_ACC2(0,0,0)] = advec_vel_temp * node_flux[OPS_ACC0(0,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
__global__ void ops_advec_mom_kernel1_z_nonvector(
const double* __restrict arg0,
const double* __restrict arg1,
double* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel1_z_nonvector + idx_z * 1*1 * xdim0_advec_mom_kernel1_z_nonvector * ydim0_advec_mom_kernel1_z_nonvector;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel1_z_nonvector + idx_z * 1*1 * xdim1_advec_mom_kernel1_z_nonvector * ydim1_advec_mom_kernel1_z_nonvector;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel1_z_nonvector + idx_z * 1*1 * xdim2_advec_mom_kernel1_z_nonvector * ydim2_advec_mom_kernel1_z_nonvector;
arg3 += idx_x * 0*1 + idx_y * 0*1 * xdim3_advec_mom_kernel1_z_nonvector + idx_z * 1*1 * xdim3_advec_mom_kernel1_z_nonvector * ydim3_advec_mom_kernel1_z_nonvector;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_mom_kernel1_z_nonvector + idx_z * 1*1 * xdim4_advec_mom_kernel1_z_nonvector * ydim4_advec_mom_kernel1_z_nonvector;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel1_z_nonvector_gpu(arg0, arg1, arg2, arg3,
arg4);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel1_z_nonvector(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
#else
void ops_par_loop_advec_mom_kernel1_z_nonvector_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,5,range,137)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(137,"advec_mom_kernel1_z_nonvector");
OPS_kernels[137].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel1_z_nonvector_h || ydim0 != ydim0_advec_mom_kernel1_z_nonvector_h || xdim1 != xdim1_advec_mom_kernel1_z_nonvector_h || ydim1 != ydim1_advec_mom_kernel1_z_nonvector_h || xdim2 != xdim2_advec_mom_kernel1_z_nonvector_h || ydim2 != ydim2_advec_mom_kernel1_z_nonvector_h || xdim3 != xdim3_advec_mom_kernel1_z_nonvector_h || ydim3 != ydim3_advec_mom_kernel1_z_nonvector_h || xdim4 != xdim4_advec_mom_kernel1_z_nonvector_h || ydim4 != ydim4_advec_mom_kernel1_z_nonvector_h) {
cudaMemcpyToSymbol( xdim0_advec_mom_kernel1_z_nonvector, &xdim0, sizeof(int) );
xdim0_advec_mom_kernel1_z_nonvector_h = xdim0;
cudaMemcpyToSymbol( ydim0_advec_mom_kernel1_z_nonvector, &ydim0, sizeof(int) );
ydim0_advec_mom_kernel1_z_nonvector_h = ydim0;
cudaMemcpyToSymbol( xdim1_advec_mom_kernel1_z_nonvector, &xdim1, sizeof(int) );
xdim1_advec_mom_kernel1_z_nonvector_h = xdim1;
cudaMemcpyToSymbol( ydim1_advec_mom_kernel1_z_nonvector, &ydim1, sizeof(int) );
ydim1_advec_mom_kernel1_z_nonvector_h = ydim1;
cudaMemcpyToSymbol( xdim2_advec_mom_kernel1_z_nonvector, &xdim2, sizeof(int) );
xdim2_advec_mom_kernel1_z_nonvector_h = xdim2;
cudaMemcpyToSymbol( ydim2_advec_mom_kernel1_z_nonvector, &ydim2, sizeof(int) );
ydim2_advec_mom_kernel1_z_nonvector_h = ydim2;
cudaMemcpyToSymbol( xdim3_advec_mom_kernel1_z_nonvector, &xdim3, sizeof(int) );
xdim3_advec_mom_kernel1_z_nonvector_h = xdim3;
cudaMemcpyToSymbol( ydim3_advec_mom_kernel1_z_nonvector, &ydim3, sizeof(int) );
ydim3_advec_mom_kernel1_z_nonvector_h = ydim3;
cudaMemcpyToSymbol( xdim4_advec_mom_kernel1_z_nonvector, &xdim4, sizeof(int) );
xdim4_advec_mom_kernel1_z_nonvector_h = xdim4;
cudaMemcpyToSymbol( ydim4_advec_mom_kernel1_z_nonvector, &ydim4, sizeof(int) );
ydim4_advec_mom_kernel1_z_nonvector_h = ydim4;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[5];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args,5,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[137].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_mom_kernel1_z_nonvector<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[137].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[137].mpi_time += t2-t1;
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel1_z_nonvector(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 137;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 137;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 5;
desc->args = (ops_arg*)malloc(5*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->function = ops_par_loop_advec_mom_kernel1_z_nonvector_execute;
if (OPS_diags > 1) {
ops_timing_realloc(137,"advec_mom_kernel1_z_nonvector");
}
ops_enqueue_kernel(desc);
}
#endif
|
5b31d98dfbba62832000796473c09f73859d261a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float t2_0=0.0f, t2_1=0.0f, t3_0=0.0f, t3_1=0.0f, out=0.0f;
float b2_0=0.0f, b2_1=0.0f, b3_0=0.0f, b3_1=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-8);
int __iter_y__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y));
//Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_1__[__iter_3__-__iter_0__] = 0.0f;
}
// Initial loop
for (int __iter_1__ = FORMA_MAX(0,__iter_y__-4); __iter_1__ <= __iter_y__+3; __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
out=b3_1; b3_1=b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
}
// Rest of the computation
for (int __iter_1__ = __iter_y__+4; __iter_1__ <= FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+3); __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
__var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-4,0)] = out;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
out=b3_1; b3_1=b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
hipMalloc(&__var_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__size_0___kernel___forma_kernel__0__/32);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_0___kernel___forma_kernel__0__/32, __var_2__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_0___kernel___forma_kernel__0__/32, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
}
/*Host Free End*/
| 5b31d98dfbba62832000796473c09f73859d261a.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float t2_0=0.0f, t2_1=0.0f, t3_0=0.0f, t3_1=0.0f, out=0.0f;
float b2_0=0.0f, b2_1=0.0f, b3_0=0.0f, b3_1=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-8);
int __iter_y__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y));
//Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_1__[__iter_3__-__iter_0__] = 0.0f;
}
// Initial loop
for (int __iter_1__ = FORMA_MAX(0,__iter_y__-4); __iter_1__ <= __iter_y__+3; __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
out=b3_1; b3_1=b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
}
// Rest of the computation
for (int __iter_1__ = __iter_y__+4; __iter_1__ <= FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+3); __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
__var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-4,0)] = out;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
out=b3_1; b3_1=b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
cudaMalloc(&__var_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__size_0___kernel___forma_kernel__0__/32);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_0___kernel___forma_kernel__0__/32, __var_2__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_0___kernel___forma_kernel__0__/32, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
}
/*Host Free End*/
|
ca446e7986434759f2c33c0efc1d7ada168766bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "monolithic.h"
#include "hip/hip_fp16.h"
template <typename mat_type>
void init_matrix(mat_type *mat, int m, int n, int rs, int cs)
{
for (int i = 0; i<m; i++)
for (int j = 0; j<n; j++)
mat[i*rs + j*cs] = (mat_type) (rand() % 3);
}
template <typename mat_type>
void print_matrix(mat_type *mat, const char *name, int m, int n, int rs, int cs)
{
cout << name << " = [ \n";
for (int i = 0; i<m; i++) {
for (int j = 0; j<n; j++)
{
if (std::is_same<mat_type, half>::value)
std::cout << (float) __half2float(mat[i*rs + j*cs]) << " ";
else
std::cout << (float) mat[i*rs + j*cs] << " ";
}
cout << endl;
}
cout << " ]; " << endl;
}
template <typename mat_type>
int max_abs_diff(mat_type *ref, mat_type *result, int m, int n, double *diff, double max_err)
{
int wrong_count = 0;
for(int i = 0; i < m; i++)
for(int j = 0; j < n; j++) {
*diff = max(*diff, dabs(ref[i*m + j] - result[i*m + j]));
if (*diff > max_err)
wrong_count++;
}
return wrong_count;
}
template void init_matrix<half> (half *mat, int m, int n, int rs, int cs);
template void init_matrix<float> (float *mat, int m, int n, int rs, int cs);
template void print_matrix<half> (half *mat, const char *name, int m, int n, int rs, int cs);
template void print_matrix<float> (float *mat, const char *name, int m, int n, int rs, int cs);
template int max_abs_diff<float>(float *ref, float *result, int m, int n, double *diff, double max_err); | ca446e7986434759f2c33c0efc1d7ada168766bb.cu | #include "monolithic.h"
#include "cuda_fp16.h"
template <typename mat_type>
void init_matrix(mat_type *mat, int m, int n, int rs, int cs)
{
for (int i = 0; i<m; i++)
for (int j = 0; j<n; j++)
mat[i*rs + j*cs] = (mat_type) (rand() % 3);
}
template <typename mat_type>
void print_matrix(mat_type *mat, const char *name, int m, int n, int rs, int cs)
{
cout << name << " = [ \n";
for (int i = 0; i<m; i++) {
for (int j = 0; j<n; j++)
{
if (std::is_same<mat_type, half>::value)
std::cout << (float) __half2float(mat[i*rs + j*cs]) << " ";
else
std::cout << (float) mat[i*rs + j*cs] << " ";
}
cout << endl;
}
cout << " ]; " << endl;
}
template <typename mat_type>
int max_abs_diff(mat_type *ref, mat_type *result, int m, int n, double *diff, double max_err)
{
int wrong_count = 0;
for(int i = 0; i < m; i++)
for(int j = 0; j < n; j++) {
*diff = max(*diff, dabs(ref[i*m + j] - result[i*m + j]));
if (*diff > max_err)
wrong_count++;
}
return wrong_count;
}
template void init_matrix<half> (half *mat, int m, int n, int rs, int cs);
template void init_matrix<float> (float *mat, int m, int n, int rs, int cs);
template void print_matrix<half> (half *mat, const char *name, int m, int n, int rs, int cs);
template void print_matrix<float> (float *mat, const char *name, int m, int n, int rs, int cs);
template int max_abs_diff<float>(float *ref, float *result, int m, int n, double *diff, double max_err); |
7a2fadf1771fda69474f8dd312965d7303d5ebe0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wall_helpers.h"
#include <mirheo/core/celllist.h>
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/folders.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/walls/simple_stationary_wall.h>
#include <mirheo/core/xdmf/type_map.h>
#include <mirheo/core/xdmf/xdmf.h>
#include <hiprand/hiprand_kernel.h>
namespace mirheo
{
namespace wall_helpers_kernels
{
__global__ void init_sdf(int n, real *sdfs, real val)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) sdfs[i] = val;
}
__global__ void merge_sdfs(int n, const real *sdfs, real *sdfs_merged)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) sdfs_merged[i] = max(sdfs[i], sdfs_merged[i]);
}
template<bool QUERY>
__global__ void collectFrozen(PVview view, const real *sdfs, real minVal, real maxVal,
real4 *frozenPos, real4 *frozenVel, int *nFrozen)
{
const int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= view.size) return;
Particle p(view.readParticle(pid));
p.u = make_real3(0);
const real val = sdfs[pid];
if (val > minVal && val < maxVal)
{
const int ind = atomicAggInc(nFrozen);
if (!QUERY)
p.write2Real4(frozenPos, frozenVel, ind);
}
}
__global__ void initRandomPositions(int n, real3 *positions, long seed, real3 localSize)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) return;
hiprandState_t state;
real3 r;
hiprand_init(seed, i, 0, &state);
r.x = localSize.x * (hiprand_uniform(&state) - 0.5_r);
r.y = localSize.y * (hiprand_uniform(&state) - 0.5_r);
r.z = localSize.z * (hiprand_uniform(&state) - 0.5_r);
positions[i] = r;
}
__global__ void countInside(int n, const real *sdf, int *nInside, real threshold = 0._r)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
int myval = 0;
if (i < n)
myval = sdf[i] < threshold;
myval = warpReduce(myval, [] (int a, int b) {return a + b;});
if (laneId() == 0)
atomicAdd(nInside, myval);
}
} // namespace wall_helpers_kernels
static void extract_particles(ParticleVector *pv, const real *sdfs, real minVal, real maxVal)
{
PinnedBuffer<int> nFrozen(1);
PVview view(pv, pv->local());
const int nthreads = 128;
const int nblocks = getNblocks(view.size, nthreads);
nFrozen.clear(defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::collectFrozen<true>,
nblocks, nthreads, 0, defaultStream,
view, sdfs, minVal, maxVal, nullptr, nullptr, nFrozen.devPtr());
nFrozen.downloadFromDevice(defaultStream);
PinnedBuffer<real4> frozenPos(nFrozen[0]), frozenVel(nFrozen[0]);
info("Freezing %d particles", nFrozen[0]);
pv->local()->resize(nFrozen[0], defaultStream);
nFrozen.clear(defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::collectFrozen<false>,
nblocks, nthreads, 0, defaultStream,
view, sdfs, minVal, maxVal, frozenPos.devPtr(), frozenVel.devPtr(), nFrozen.devPtr());
CUDA_Check( hipDeviceSynchronize() );
std::swap(frozenPos, pv->local()->positions());
std::swap(frozenVel, pv->local()->velocities());
}
void wall_helpers::freezeParticlesInWall(SDFBasedWall *wall, ParticleVector *pv, real minVal, real maxVal)
{
CUDA_Check( hipDeviceSynchronize() );
DeviceBuffer<real> sdfs(pv->local()->size());
wall->sdfPerParticle(pv->local(), &sdfs, nullptr, 0, defaultStream);
extract_particles(pv, sdfs.devPtr(), minVal, maxVal);
}
void wall_helpers::freezeParticlesInWalls(std::vector<SDFBasedWall*> walls, ParticleVector *pv, real minVal, real maxVal)
{
CUDA_Check( hipDeviceSynchronize() );
int n = pv->local()->size();
DeviceBuffer<real> sdfs(n), sdfs_merged(n);
const int nthreads = 128;
const int nblocks = getNblocks(n, nthreads);
const real safety = 1._r;
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::init_sdf,
nblocks, nthreads, 0, defaultStream,
n, sdfs_merged.devPtr(), minVal - safety);
for (auto& wall : walls) {
wall->sdfPerParticle(pv->local(), &sdfs, nullptr, 0, defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::merge_sdfs,
nblocks, nthreads, 0, defaultStream,
n, sdfs.devPtr(), sdfs_merged.devPtr());
}
extract_particles(pv, sdfs_merged.devPtr(), minVal, maxVal);
}
void wall_helpers::dumpWalls2XDMF(std::vector<SDFBasedWall*> walls, real3 gridH, DomainInfo domain, std::string filename, MPI_Comm cartComm)
{
CUDA_Check( hipDeviceSynchronize() );
CellListInfo gridInfo(gridH, domain.localSize);
const int n = gridInfo.totcells;
DeviceBuffer<real> sdfs(n);
PinnedBuffer<real> sdfs_merged(n);
const int nthreads = 128;
const int nblocks = getNblocks(n, nthreads);
const real initial = -1e5;
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::init_sdf,
nblocks, nthreads, 0, defaultStream,
n, sdfs_merged.devPtr(), initial);
for (auto& wall : walls)
{
wall->sdfOnGrid(gridH, &sdfs, defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::merge_sdfs,
nblocks, nthreads, 0, defaultStream,
n, sdfs.devPtr(), sdfs_merged.devPtr());
}
sdfs_merged.downloadFromDevice(defaultStream);
auto path = getParentPath(filename);
if (path != "")
createFoldersCollective(cartComm, path);
XDMF::UniformGrid grid(gridInfo.ncells, gridInfo.h, cartComm);
XDMF::Channel sdfCh {"sdf", sdfs_merged.hostPtr(),
XDMF::Channel::DataForm::Scalar,
XDMF::getNumberType<real>(),
DataTypeWrapper<real>(),
XDMF::Channel::NeedShift::False};
XDMF::write(filename, &grid, {sdfCh}, cartComm);
}
double wall_helpers::volumeInsideWalls(std::vector<SDFBasedWall*> walls, DomainInfo domain, MPI_Comm comm, long nSamplesPerRank)
{
long n = nSamplesPerRank;
DeviceBuffer<real3> positions(n);
DeviceBuffer<real> sdfs(n), sdfs_merged(n);
PinnedBuffer<int> nInside(1);
const int nthreads = 128;
const int nblocks = getNblocks(n, nthreads);
const real initial = -1e5;
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::initRandomPositions,
nblocks, nthreads, 0, defaultStream,
n, positions.devPtr(), 424242, domain.localSize);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::init_sdf,
nblocks, nthreads, 0, defaultStream,
n, sdfs_merged.devPtr(), initial);
for (auto& wall : walls) {
wall->sdfPerPosition(&positions, &sdfs, defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::merge_sdfs,
nblocks, nthreads, 0, defaultStream,
n, sdfs.devPtr(), sdfs_merged.devPtr());
}
nInside.clear(defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::countInside,
nblocks, nthreads, 0, defaultStream,
n, sdfs_merged.devPtr(), nInside.devPtr());
nInside.downloadFromDevice(defaultStream, ContainersSynch::Synch);
real3 localSize = domain.localSize;
double subDomainVolume = localSize.x * localSize.y * localSize.z;
double locVolume = (double) nInside[0] / (double) n * subDomainVolume;
double totVolume = 0;
MPI_Check( MPI_Allreduce(&locVolume, &totVolume, 1, MPI_DOUBLE, MPI_SUM, comm) );
return totVolume;
}
} // namespace mirheo
| 7a2fadf1771fda69474f8dd312965d7303d5ebe0.cu | #include "wall_helpers.h"
#include <mirheo/core/celllist.h>
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/folders.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/walls/simple_stationary_wall.h>
#include <mirheo/core/xdmf/type_map.h>
#include <mirheo/core/xdmf/xdmf.h>
#include <curand_kernel.h>
namespace mirheo
{
namespace wall_helpers_kernels
{
__global__ void init_sdf(int n, real *sdfs, real val)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) sdfs[i] = val;
}
__global__ void merge_sdfs(int n, const real *sdfs, real *sdfs_merged)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) sdfs_merged[i] = max(sdfs[i], sdfs_merged[i]);
}
template<bool QUERY>
__global__ void collectFrozen(PVview view, const real *sdfs, real minVal, real maxVal,
real4 *frozenPos, real4 *frozenVel, int *nFrozen)
{
const int pid = blockIdx.x * blockDim.x + threadIdx.x;
if (pid >= view.size) return;
Particle p(view.readParticle(pid));
p.u = make_real3(0);
const real val = sdfs[pid];
if (val > minVal && val < maxVal)
{
const int ind = atomicAggInc(nFrozen);
if (!QUERY)
p.write2Real4(frozenPos, frozenVel, ind);
}
}
__global__ void initRandomPositions(int n, real3 *positions, long seed, real3 localSize)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) return;
curandState_t state;
real3 r;
curand_init(seed, i, 0, &state);
r.x = localSize.x * (curand_uniform(&state) - 0.5_r);
r.y = localSize.y * (curand_uniform(&state) - 0.5_r);
r.z = localSize.z * (curand_uniform(&state) - 0.5_r);
positions[i] = r;
}
__global__ void countInside(int n, const real *sdf, int *nInside, real threshold = 0._r)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
int myval = 0;
if (i < n)
myval = sdf[i] < threshold;
myval = warpReduce(myval, [] (int a, int b) {return a + b;});
if (laneId() == 0)
atomicAdd(nInside, myval);
}
} // namespace wall_helpers_kernels
static void extract_particles(ParticleVector *pv, const real *sdfs, real minVal, real maxVal)
{
PinnedBuffer<int> nFrozen(1);
PVview view(pv, pv->local());
const int nthreads = 128;
const int nblocks = getNblocks(view.size, nthreads);
nFrozen.clear(defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::collectFrozen<true>,
nblocks, nthreads, 0, defaultStream,
view, sdfs, minVal, maxVal, nullptr, nullptr, nFrozen.devPtr());
nFrozen.downloadFromDevice(defaultStream);
PinnedBuffer<real4> frozenPos(nFrozen[0]), frozenVel(nFrozen[0]);
info("Freezing %d particles", nFrozen[0]);
pv->local()->resize(nFrozen[0], defaultStream);
nFrozen.clear(defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::collectFrozen<false>,
nblocks, nthreads, 0, defaultStream,
view, sdfs, minVal, maxVal, frozenPos.devPtr(), frozenVel.devPtr(), nFrozen.devPtr());
CUDA_Check( cudaDeviceSynchronize() );
std::swap(frozenPos, pv->local()->positions());
std::swap(frozenVel, pv->local()->velocities());
}
void wall_helpers::freezeParticlesInWall(SDFBasedWall *wall, ParticleVector *pv, real minVal, real maxVal)
{
CUDA_Check( cudaDeviceSynchronize() );
DeviceBuffer<real> sdfs(pv->local()->size());
wall->sdfPerParticle(pv->local(), &sdfs, nullptr, 0, defaultStream);
extract_particles(pv, sdfs.devPtr(), minVal, maxVal);
}
void wall_helpers::freezeParticlesInWalls(std::vector<SDFBasedWall*> walls, ParticleVector *pv, real minVal, real maxVal)
{
CUDA_Check( cudaDeviceSynchronize() );
int n = pv->local()->size();
DeviceBuffer<real> sdfs(n), sdfs_merged(n);
const int nthreads = 128;
const int nblocks = getNblocks(n, nthreads);
const real safety = 1._r;
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::init_sdf,
nblocks, nthreads, 0, defaultStream,
n, sdfs_merged.devPtr(), minVal - safety);
for (auto& wall : walls) {
wall->sdfPerParticle(pv->local(), &sdfs, nullptr, 0, defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::merge_sdfs,
nblocks, nthreads, 0, defaultStream,
n, sdfs.devPtr(), sdfs_merged.devPtr());
}
extract_particles(pv, sdfs_merged.devPtr(), minVal, maxVal);
}
void wall_helpers::dumpWalls2XDMF(std::vector<SDFBasedWall*> walls, real3 gridH, DomainInfo domain, std::string filename, MPI_Comm cartComm)
{
CUDA_Check( cudaDeviceSynchronize() );
CellListInfo gridInfo(gridH, domain.localSize);
const int n = gridInfo.totcells;
DeviceBuffer<real> sdfs(n);
PinnedBuffer<real> sdfs_merged(n);
const int nthreads = 128;
const int nblocks = getNblocks(n, nthreads);
const real initial = -1e5;
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::init_sdf,
nblocks, nthreads, 0, defaultStream,
n, sdfs_merged.devPtr(), initial);
for (auto& wall : walls)
{
wall->sdfOnGrid(gridH, &sdfs, defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::merge_sdfs,
nblocks, nthreads, 0, defaultStream,
n, sdfs.devPtr(), sdfs_merged.devPtr());
}
sdfs_merged.downloadFromDevice(defaultStream);
auto path = getParentPath(filename);
if (path != "")
createFoldersCollective(cartComm, path);
XDMF::UniformGrid grid(gridInfo.ncells, gridInfo.h, cartComm);
XDMF::Channel sdfCh {"sdf", sdfs_merged.hostPtr(),
XDMF::Channel::DataForm::Scalar,
XDMF::getNumberType<real>(),
DataTypeWrapper<real>(),
XDMF::Channel::NeedShift::False};
XDMF::write(filename, &grid, {sdfCh}, cartComm);
}
double wall_helpers::volumeInsideWalls(std::vector<SDFBasedWall*> walls, DomainInfo domain, MPI_Comm comm, long nSamplesPerRank)
{
long n = nSamplesPerRank;
DeviceBuffer<real3> positions(n);
DeviceBuffer<real> sdfs(n), sdfs_merged(n);
PinnedBuffer<int> nInside(1);
const int nthreads = 128;
const int nblocks = getNblocks(n, nthreads);
const real initial = -1e5;
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::initRandomPositions,
nblocks, nthreads, 0, defaultStream,
n, positions.devPtr(), 424242, domain.localSize);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::init_sdf,
nblocks, nthreads, 0, defaultStream,
n, sdfs_merged.devPtr(), initial);
for (auto& wall : walls) {
wall->sdfPerPosition(&positions, &sdfs, defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::merge_sdfs,
nblocks, nthreads, 0, defaultStream,
n, sdfs.devPtr(), sdfs_merged.devPtr());
}
nInside.clear(defaultStream);
SAFE_KERNEL_LAUNCH(
wall_helpers_kernels::countInside,
nblocks, nthreads, 0, defaultStream,
n, sdfs_merged.devPtr(), nInside.devPtr());
nInside.downloadFromDevice(defaultStream, ContainersSynch::Synch);
real3 localSize = domain.localSize;
double subDomainVolume = localSize.x * localSize.y * localSize.z;
double locVolume = (double) nInside[0] / (double) n * subDomainVolume;
double totVolume = 0;
MPI_Check( MPI_Allreduce(&locVolume, &totVolume, 1, MPI_DOUBLE, MPI_SUM, comm) );
return totVolume;
}
} // namespace mirheo
|
057ff2e5be4434d440023ea6393e15cffe8b7bac.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <cstdlib>
#include <ctime>
#include <thrust/scan.h>
using namespace std;
__global__
void prefixScan1(int *in,int *out, int N)
{
__shared__ int temp[2048];
int threadId = threadIdx.x;
int pout = 0, pin = 1;
//load input into shared memory
//exclusive scan, so shift right by one and set first element to 0
temp[threadId] = (threadId > 0) ? in[threadId - 1] : 0;
__syncthreads();
for(int offset = 1; offset < N; offset *= 2)
{
//swap double buffer
pout = 1 - pout;
pin = 1 - pin;
if(threadId >= offset)
temp[pout * N + threadId] = temp[pin * N + threadId] + temp[pin * N + threadId - offset];
else
temp[pout * N + threadId] = temp[pin * N + threadId];
__syncthreads();
}
out[threadId] = temp[pout * N + threadId];
}
__global__
void prefixScan2(int *in, int *out, int n)
{
__shared__ int temp[2048];
int threadId = threadIdx.x;
int offset = 1;
//load input into shared memory
temp[2 * threadId] = in[2 * threadId];
temp[2 * threadId + 1] = in[2 * threadId + 1];
__syncthreads();
for(int d = n/2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if(threadId < d)
{
int ai = offset * (2 * threadId + 1) - 1;
int bi = offset * (2 * threadId + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if(threadId == 0) // clear the last element
temp[n-1] = 0;
for(int d = 1; d < n; d *= 2)
{
offset /= 2;
__syncthreads();
if(threadId < d)
{
int ai = offset * (2 * threadId + 1) - 1;
int bi = offset * (2 * threadId + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
out[2 * threadId] = temp[2 * threadId];
out[2 * threadId + 1] = temp[2 * threadId + 1];
}
int main()
{
int *h_in,*h_out,*d_in,*d_out;
#ifdef SCAN1
int N = 1024;
#else
int N = 2048;
#endif
h_in = (int*)malloc(sizeof(int) * N);
h_out = (int*)malloc(sizeof(int) * N);
hipMalloc(&d_in,sizeof(int) * N);
hipMalloc(&d_out,sizeof(int) * N);
srand(time(NULL));
for(int i = 0; i < N; ++i)
h_in[i] = rand() % 5;
hipMemcpy(d_in,h_in,sizeof(int) * N, hipMemcpyHostToDevice);
dim3 grid(1);
dim3 block(1024);
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float ElapsedTime;
float sumTime = 0;
for(int i = 0; i < 100; ++i)
{
hipEventRecord(start,0);
#ifdef SCAN1
hipLaunchKernelGGL(( prefixScan1), dim3(grid),dim3(block), 0, 0, d_in,d_out,N);
#elif SCAN2
hipLaunchKernelGGL(( prefixScan2), dim3(grid),dim3(block), 0, 0, d_in,d_out,N);
#endif
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&ElapsedTime,start,stop);
sumTime += ElapsedTime;
}
printf("average execution time %.3f ms\n ",sumTime / 100);
hipMemcpy(h_out,d_out,sizeof(int) * N, hipMemcpyDeviceToHost);
thrust::exclusive_scan(h_in,h_in + N,h_in);
bool success = true;
for(int i = 0; i < N; ++i)
{
if(h_out[i] != h_in[i])
{
success = false;
break;
}
}
if(success)
printf("Success!\n");
else
printf("Error!\n");
free(h_in);
free(h_out);
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 057ff2e5be4434d440023ea6393e15cffe8b7bac.cu | #include <iostream>
#include <cstdio>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cstdlib>
#include <ctime>
#include <thrust/scan.h>
using namespace std;
__global__
void prefixScan1(int *in,int *out, int N)
{
__shared__ int temp[2048];
int threadId = threadIdx.x;
int pout = 0, pin = 1;
//load input into shared memory
//exclusive scan, so shift right by one and set first element to 0
temp[threadId] = (threadId > 0) ? in[threadId - 1] : 0;
__syncthreads();
for(int offset = 1; offset < N; offset *= 2)
{
//swap double buffer
pout = 1 - pout;
pin = 1 - pin;
if(threadId >= offset)
temp[pout * N + threadId] = temp[pin * N + threadId] + temp[pin * N + threadId - offset];
else
temp[pout * N + threadId] = temp[pin * N + threadId];
__syncthreads();
}
out[threadId] = temp[pout * N + threadId];
}
__global__
void prefixScan2(int *in, int *out, int n)
{
__shared__ int temp[2048];
int threadId = threadIdx.x;
int offset = 1;
//load input into shared memory
temp[2 * threadId] = in[2 * threadId];
temp[2 * threadId + 1] = in[2 * threadId + 1];
__syncthreads();
for(int d = n/2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if(threadId < d)
{
int ai = offset * (2 * threadId + 1) - 1;
int bi = offset * (2 * threadId + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if(threadId == 0) // clear the last element
temp[n-1] = 0;
for(int d = 1; d < n; d *= 2)
{
offset /= 2;
__syncthreads();
if(threadId < d)
{
int ai = offset * (2 * threadId + 1) - 1;
int bi = offset * (2 * threadId + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
out[2 * threadId] = temp[2 * threadId];
out[2 * threadId + 1] = temp[2 * threadId + 1];
}
int main()
{
int *h_in,*h_out,*d_in,*d_out;
#ifdef SCAN1
int N = 1024;
#else
int N = 2048;
#endif
h_in = (int*)malloc(sizeof(int) * N);
h_out = (int*)malloc(sizeof(int) * N);
cudaMalloc(&d_in,sizeof(int) * N);
cudaMalloc(&d_out,sizeof(int) * N);
srand(time(NULL));
for(int i = 0; i < N; ++i)
h_in[i] = rand() % 5;
cudaMemcpy(d_in,h_in,sizeof(int) * N, cudaMemcpyHostToDevice);
dim3 grid(1);
dim3 block(1024);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ElapsedTime;
float sumTime = 0;
for(int i = 0; i < 100; ++i)
{
cudaEventRecord(start,0);
#ifdef SCAN1
prefixScan1<<<grid,block>>>(d_in,d_out,N);
#elif SCAN2
prefixScan2<<<grid,block>>>(d_in,d_out,N);
#endif
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ElapsedTime,start,stop);
sumTime += ElapsedTime;
}
printf("average execution time %.3f ms\n ",sumTime / 100);
cudaMemcpy(h_out,d_out,sizeof(int) * N, cudaMemcpyDeviceToHost);
thrust::exclusive_scan(h_in,h_in + N,h_in);
bool success = true;
for(int i = 0; i < N; ++i)
{
if(h_out[i] != h_in[i])
{
success = false;
break;
}
}
if(success)
printf("Success!\n");
else
printf("Error!\n");
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
7b1b866bdad819586d69b8136263e8da4eae5fbe.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <functions/linearReg.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LinRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LinRegLossTest : public ::testing::TestWithParam<LinRegLossInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LinRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocator.reset(new raft::mr::device::default_allocator);
allocate(in, len);
allocate(out, 1);
allocate(out_lasso, 1);
allocate(out_ridge, 1);
allocate(out_elasticnet, 1);
allocate(out_grad, n_cols);
allocate(out_lasso_grad, n_cols);
allocate(out_ridge_grad, n_cols);
allocate(out_elasticnet_grad, n_cols);
allocate(out_ref, 1);
allocate(out_lasso_ref, 1);
allocate(out_ridge_ref, 1);
allocate(out_elasticnet_ref, 1);
allocate(out_grad_ref, n_cols);
allocate(out_lasso_grad_ref, n_cols);
allocate(out_ridge_grad_ref, n_cols);
allocate(out_elasticnet_grad_ref, n_cols);
allocate(labels, params.n_rows);
allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
updateDevice(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
updateDevice(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
updateDevice(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {1.854842};
updateDevice(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {2.2088};
updateDevice(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {1.9629};
updateDevice(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {2.0858};
updateDevice(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.56995, -3.12486};
updateDevice(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.03005, -3.724866};
updateDevice(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols, stream);
T h_out_ridge_grad_ref[n_cols] = {-0.14995, -3.412866};
updateDevice(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols, stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.05995, -3.568866};
updateDevice(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref, n_cols,
stream);
T alpha = 0.6;
T l1_ratio = 0.5;
linearRegLoss(in, params.n_rows, params.n_cols, labels, coef, out,
penalty::NONE, alpha, l1_ratio, cublas_handle, allocator,
stream);
updateDevice(in, h_in, len, stream);
linearRegLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_grad,
penalty::NONE, alpha, l1_ratio, cublas_handle, allocator,
stream);
updateDevice(in, h_in, len, stream);
linearRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_lasso,
penalty::L1, alpha, l1_ratio, cublas_handle, allocator,
stream);
updateDevice(in, h_in, len, stream);
linearRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_lasso_grad, penalty::L1, alpha, l1_ratio,
cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
linearRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_ridge,
penalty::L2, alpha, l1_ratio, cublas_handle, allocator,
stream);
linearRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_ridge_grad, penalty::L2, alpha, l1_ratio,
cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
linearRegLoss(in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet, penalty::ELASTICNET, alpha, l1_ratio,
cublas_handle, allocator, stream);
linearRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet_grad, penalty::ELASTICNET, alpha,
l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_lasso));
CUDA_CHECK(hipFree(out_ridge));
CUDA_CHECK(hipFree(out_elasticnet));
CUDA_CHECK(hipFree(out_grad));
CUDA_CHECK(hipFree(out_lasso_grad));
CUDA_CHECK(hipFree(out_ridge_grad));
CUDA_CHECK(hipFree(out_elasticnet_grad));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out_lasso_ref));
CUDA_CHECK(hipFree(out_ridge_ref));
CUDA_CHECK(hipFree(out_elasticnet_ref));
CUDA_CHECK(hipFree(out_grad_ref));
CUDA_CHECK(hipFree(out_lasso_grad_ref));
CUDA_CHECK(hipFree(out_ridge_grad_ref));
CUDA_CHECK(hipFree(out_elasticnet_grad_ref));
}
protected:
LinRegLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref,
*out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<LinRegLossInputs<float>> inputsf = {{0.01f, 3, 2, 6}};
const std::vector<LinRegLossInputs<double>> inputsd = {{0.01, 3, 2, 6}};
typedef LinRegLossTest<float> LinRegLossTestF;
TEST_P(LinRegLossTestF, Result) {
ASSERT_TRUE(
devArrMatch(out_ref, out, 1, CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
CompareApprox<float>(params.tolerance)));
}
typedef LinRegLossTest<double> LinRegLossTestD;
TEST_P(LinRegLossTestD, Result) {
ASSERT_TRUE(
devArrMatch(out_ref, out, 1, CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestD,
::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
| 7b1b866bdad819586d69b8136263e8da4eae5fbe.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <functions/linearReg.cuh>
#include <random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LinRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LinRegLossTest : public ::testing::TestWithParam<LinRegLossInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LinRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocator.reset(new raft::mr::device::default_allocator);
allocate(in, len);
allocate(out, 1);
allocate(out_lasso, 1);
allocate(out_ridge, 1);
allocate(out_elasticnet, 1);
allocate(out_grad, n_cols);
allocate(out_lasso_grad, n_cols);
allocate(out_ridge_grad, n_cols);
allocate(out_elasticnet_grad, n_cols);
allocate(out_ref, 1);
allocate(out_lasso_ref, 1);
allocate(out_ridge_ref, 1);
allocate(out_elasticnet_ref, 1);
allocate(out_grad_ref, n_cols);
allocate(out_lasso_grad_ref, n_cols);
allocate(out_ridge_grad_ref, n_cols);
allocate(out_elasticnet_grad_ref, n_cols);
allocate(labels, params.n_rows);
allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
updateDevice(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
updateDevice(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
updateDevice(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {1.854842};
updateDevice(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {2.2088};
updateDevice(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {1.9629};
updateDevice(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {2.0858};
updateDevice(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.56995, -3.12486};
updateDevice(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.03005, -3.724866};
updateDevice(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols, stream);
T h_out_ridge_grad_ref[n_cols] = {-0.14995, -3.412866};
updateDevice(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols, stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.05995, -3.568866};
updateDevice(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref, n_cols,
stream);
T alpha = 0.6;
T l1_ratio = 0.5;
linearRegLoss(in, params.n_rows, params.n_cols, labels, coef, out,
penalty::NONE, alpha, l1_ratio, cublas_handle, allocator,
stream);
updateDevice(in, h_in, len, stream);
linearRegLossGrads(in, params.n_rows, params.n_cols, labels, coef, out_grad,
penalty::NONE, alpha, l1_ratio, cublas_handle, allocator,
stream);
updateDevice(in, h_in, len, stream);
linearRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_lasso,
penalty::L1, alpha, l1_ratio, cublas_handle, allocator,
stream);
updateDevice(in, h_in, len, stream);
linearRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_lasso_grad, penalty::L1, alpha, l1_ratio,
cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
linearRegLoss(in, params.n_rows, params.n_cols, labels, coef, out_ridge,
penalty::L2, alpha, l1_ratio, cublas_handle, allocator,
stream);
linearRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_ridge_grad, penalty::L2, alpha, l1_ratio,
cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
linearRegLoss(in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet, penalty::ELASTICNET, alpha, l1_ratio,
cublas_handle, allocator, stream);
linearRegLossGrads(in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet_grad, penalty::ELASTICNET, alpha,
l1_ratio, cublas_handle, allocator, stream);
updateDevice(in, h_in, len, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_lasso));
CUDA_CHECK(cudaFree(out_ridge));
CUDA_CHECK(cudaFree(out_elasticnet));
CUDA_CHECK(cudaFree(out_grad));
CUDA_CHECK(cudaFree(out_lasso_grad));
CUDA_CHECK(cudaFree(out_ridge_grad));
CUDA_CHECK(cudaFree(out_elasticnet_grad));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out_lasso_ref));
CUDA_CHECK(cudaFree(out_ridge_ref));
CUDA_CHECK(cudaFree(out_elasticnet_ref));
CUDA_CHECK(cudaFree(out_grad_ref));
CUDA_CHECK(cudaFree(out_lasso_grad_ref));
CUDA_CHECK(cudaFree(out_ridge_grad_ref));
CUDA_CHECK(cudaFree(out_elasticnet_grad_ref));
}
protected:
LinRegLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref,
*out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<LinRegLossInputs<float>> inputsf = {{0.01f, 3, 2, 6}};
const std::vector<LinRegLossInputs<double>> inputsd = {{0.01, 3, 2, 6}};
typedef LinRegLossTest<float> LinRegLossTestF;
TEST_P(LinRegLossTestF, Result) {
ASSERT_TRUE(
devArrMatch(out_ref, out, 1, CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
CompareApprox<float>(params.tolerance)));
}
typedef LinRegLossTest<double> LinRegLossTestD;
TEST_P(LinRegLossTestD, Result) {
ASSERT_TRUE(
devArrMatch(out_ref, out, 1, CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestD,
::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
|
07e6f1d72a7b25c484a599ec36e0fcce3432bd1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void multiplyNumbersByAScalar(float numbers[], float scalar) {
int x = blockIdx.x;
numbers[x] = numbers[x] * scalar;
}
int main(int argc, char** args)
{
float numbersInSystemMemory[] = { 0, 1, 2 , 3 , 4 , 5 , 6 ,7 ,8 , 9};
float* numbersInDeviceMemory;
hipMalloc( (void**)&numbersInDeviceMemory, sizeof(float) * 10);
hipMemcpy( numbersInDeviceMemory, numbersInSystemMemory, sizeof(float) * 10, hipMemcpyHostToDevice );
hipLaunchKernelGGL(( multiplyNumbersByAScalar), dim3(10),dim3(1), 0, 0, numbersInDeviceMemory, 2.0f);
hipMemcpy( numbersInSystemMemory, numbersInDeviceMemory, sizeof(float) * 10, hipMemcpyDeviceToHost );
hipFree( numbersInDeviceMemory );
for(int x = 0; x < 10 ; x++){
printf("%f ", numbersInSystemMemory[x]);
}
return 1;
}
| 07e6f1d72a7b25c484a599ec36e0fcce3432bd1e.cu | #include <stdio.h>
__global__ void multiplyNumbersByAScalar(float numbers[], float scalar) {
int x = blockIdx.x;
numbers[x] = numbers[x] * scalar;
}
int main(int argc, char** args)
{
float numbersInSystemMemory[] = { 0, 1, 2 , 3 , 4 , 5 , 6 ,7 ,8 , 9};
float* numbersInDeviceMemory;
cudaMalloc( (void**)&numbersInDeviceMemory, sizeof(float) * 10);
cudaMemcpy( numbersInDeviceMemory, numbersInSystemMemory, sizeof(float) * 10, cudaMemcpyHostToDevice );
multiplyNumbersByAScalar<<<10,1>>>(numbersInDeviceMemory, 2.0f);
cudaMemcpy( numbersInSystemMemory, numbersInDeviceMemory, sizeof(float) * 10, cudaMemcpyDeviceToHost );
cudaFree( numbersInDeviceMemory );
for(int x = 0; x < 10 ; x++){
printf("%f ", numbersInSystemMemory[x]);
}
return 1;
}
|
6657cfdcb45d1487e7d36ef1c7ac1251487d4feb.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/framework/fleet/heter_ps/feature_value.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_comm.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h"
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
using namespace paddle::framework;
TEST(TEST_FLEET, graph_comm) {
int gpu_count = 3;
std::vector<int> dev_ids;
dev_ids.push_back(0);
dev_ids.push_back(1);
dev_ids.push_back(2);
std::shared_ptr<HeterPsResource> resource =
std::make_shared<HeterPsResource>(dev_ids);
resource->enable_p2p();
GpuPsGraphTable g(resource);
int node_count = 10;
std::vector<std::vector<int64_t>> neighbors(node_count);
int ind = 0;
int64_t node_id = 0;
std::vector<GpuPsCommGraph> graph_list(gpu_count);
while (ind < node_count) {
int neighbor_size = ind + 1;
graph_list[ind % gpu_count].node_size++;
graph_list[ind % gpu_count].neighbor_size += neighbor_size;
while (neighbor_size--) {
neighbors[ind].push_back(node_id++);
}
ind++;
}
std::vector<int> neighbor_offset(gpu_count, 0), node_index(gpu_count, 0);
for (int i = 0; i < graph_list.size(); i++) {
graph_list[i].node_list = new GpuPsGraphNode[graph_list[i].node_size];
graph_list[i].neighbor_list = new int64_t[graph_list[i].neighbor_size];
}
for (int i = 0; i < node_count; i++) {
ind = i % gpu_count;
graph_list[ind].node_list[node_index[ind]].node_id = i;
graph_list[ind].node_list[node_index[ind]].neighbor_offset =
neighbor_offset[ind];
graph_list[ind].node_list[node_index[ind]].neighbor_size =
neighbors[i].size();
for (auto x : neighbors[i]) {
graph_list[ind].neighbor_list[neighbor_offset[ind]++] = x;
}
node_index[ind]++;
}
g.build_graph_from_cpu(graph_list);
/*
gpu 0:
0,3,6,9
gpu 1:
1,4,7
gpu 2:
2,5,8
query(2,6) returns nodes [6,9,1,4,7,2]
*/
int64_t answer[6] = {6, 9, 1, 4, 7, 2};
int64_t *res = new int64_t[6];
auto query_res = g.query_node_list(0, 2, 6);
hipMemcpy(res, query_res->val, 48, hipMemcpyDeviceToHost);
ASSERT_EQ(query_res->actual_sample_size, 6);
for (int i = 0; i < 6; i++) {
ASSERT_EQ(res[i], answer[i]);
}
delete[] res;
delete query_res;
/*
node x's neighbor list = [(1+x)*x/2,(1+x)*x/2 + 1,.....,(1+x)*x/2 + x]
so node 6's neighbors are [21,22...,27]
node 7's neighbors are [28,29,..35]
node 0's neighbors are [0]
query([7,0,6],sample_size=3) should return [28,29,30,0,x,x,21,22,23]
6 --index-->2
0 --index--->0
7 --index-->2
*/
int64_t cpu_key[3] = {7, 0, 6};
void *key;
hipMalloc((void **)&key, 3 * sizeof(int64_t));
hipMemcpy(key, cpu_key, 3 * sizeof(int64_t), hipMemcpyHostToDevice);
auto neighbor_sample_res = g.graph_neighbor_sample(0, (int64_t *)key, 3, 3);
res = new int64_t[7];
hipMemcpy(res, neighbor_sample_res->val, 56, hipMemcpyDeviceToHost);
int *actual_sample_size = new int[3];
hipMemcpy(actual_sample_size, neighbor_sample_res->actual_sample_size, 12,
hipMemcpyDeviceToHost); // 3, 1, 3
int *cumsum_sample_size = new int[3];
hipMemcpy(cumsum_sample_size, neighbor_sample_res->offset, 12,
hipMemcpyDeviceToHost); // 0, 3, 4
std::vector<std::vector<int64_t>> neighbors_;
std::vector<int64_t> neighbors_7 = {28, 29, 30, 31, 32, 33, 34, 35};
std::vector<int64_t> neighbors_0 = {0};
std::vector<int64_t> neighbors_6 = {21, 22, 23, 24, 25, 26, 27};
neighbors_.push_back(neighbors_7);
neighbors_.push_back(neighbors_0);
neighbors_.push_back(neighbors_6);
for (int i = 0; i < 3; i++) {
for (int j = cumsum_sample_size[i];
j < cumsum_sample_size[i] + actual_sample_size[i]; j++) {
bool flag = false;
for (int k = 0; k < neighbors_[i].size(); k++) {
if (res[j] == neighbors_[i][k]) {
flag = true;
break;
}
}
ASSERT_EQ(flag, true);
}
}
delete[] res;
delete[] actual_sample_size;
delete[] cumsum_sample_size;
delete neighbor_sample_res;
}
| 6657cfdcb45d1487e7d36ef1c7ac1251487d4feb.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include <vector>
#include "paddle/fluid/framework/fleet/heter_ps/feature_value.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_comm.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h"
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
using namespace paddle::framework;
TEST(TEST_FLEET, graph_comm) {
int gpu_count = 3;
std::vector<int> dev_ids;
dev_ids.push_back(0);
dev_ids.push_back(1);
dev_ids.push_back(2);
std::shared_ptr<HeterPsResource> resource =
std::make_shared<HeterPsResource>(dev_ids);
resource->enable_p2p();
GpuPsGraphTable g(resource);
int node_count = 10;
std::vector<std::vector<int64_t>> neighbors(node_count);
int ind = 0;
int64_t node_id = 0;
std::vector<GpuPsCommGraph> graph_list(gpu_count);
while (ind < node_count) {
int neighbor_size = ind + 1;
graph_list[ind % gpu_count].node_size++;
graph_list[ind % gpu_count].neighbor_size += neighbor_size;
while (neighbor_size--) {
neighbors[ind].push_back(node_id++);
}
ind++;
}
std::vector<int> neighbor_offset(gpu_count, 0), node_index(gpu_count, 0);
for (int i = 0; i < graph_list.size(); i++) {
graph_list[i].node_list = new GpuPsGraphNode[graph_list[i].node_size];
graph_list[i].neighbor_list = new int64_t[graph_list[i].neighbor_size];
}
for (int i = 0; i < node_count; i++) {
ind = i % gpu_count;
graph_list[ind].node_list[node_index[ind]].node_id = i;
graph_list[ind].node_list[node_index[ind]].neighbor_offset =
neighbor_offset[ind];
graph_list[ind].node_list[node_index[ind]].neighbor_size =
neighbors[i].size();
for (auto x : neighbors[i]) {
graph_list[ind].neighbor_list[neighbor_offset[ind]++] = x;
}
node_index[ind]++;
}
g.build_graph_from_cpu(graph_list);
/*
gpu 0:
0,3,6,9
gpu 1:
1,4,7
gpu 2:
2,5,8
query(2,6) returns nodes [6,9,1,4,7,2]
*/
int64_t answer[6] = {6, 9, 1, 4, 7, 2};
int64_t *res = new int64_t[6];
auto query_res = g.query_node_list(0, 2, 6);
cudaMemcpy(res, query_res->val, 48, cudaMemcpyDeviceToHost);
ASSERT_EQ(query_res->actual_sample_size, 6);
for (int i = 0; i < 6; i++) {
ASSERT_EQ(res[i], answer[i]);
}
delete[] res;
delete query_res;
/*
node x's neighbor list = [(1+x)*x/2,(1+x)*x/2 + 1,.....,(1+x)*x/2 + x]
so node 6's neighbors are [21,22...,27]
node 7's neighbors are [28,29,..35]
node 0's neighbors are [0]
query([7,0,6],sample_size=3) should return [28,29,30,0,x,x,21,22,23]
6 --index-->2
0 --index--->0
7 --index-->2
*/
int64_t cpu_key[3] = {7, 0, 6};
void *key;
cudaMalloc((void **)&key, 3 * sizeof(int64_t));
cudaMemcpy(key, cpu_key, 3 * sizeof(int64_t), cudaMemcpyHostToDevice);
auto neighbor_sample_res = g.graph_neighbor_sample(0, (int64_t *)key, 3, 3);
res = new int64_t[7];
cudaMemcpy(res, neighbor_sample_res->val, 56, cudaMemcpyDeviceToHost);
int *actual_sample_size = new int[3];
cudaMemcpy(actual_sample_size, neighbor_sample_res->actual_sample_size, 12,
cudaMemcpyDeviceToHost); // 3, 1, 3
int *cumsum_sample_size = new int[3];
cudaMemcpy(cumsum_sample_size, neighbor_sample_res->offset, 12,
cudaMemcpyDeviceToHost); // 0, 3, 4
std::vector<std::vector<int64_t>> neighbors_;
std::vector<int64_t> neighbors_7 = {28, 29, 30, 31, 32, 33, 34, 35};
std::vector<int64_t> neighbors_0 = {0};
std::vector<int64_t> neighbors_6 = {21, 22, 23, 24, 25, 26, 27};
neighbors_.push_back(neighbors_7);
neighbors_.push_back(neighbors_0);
neighbors_.push_back(neighbors_6);
for (int i = 0; i < 3; i++) {
for (int j = cumsum_sample_size[i];
j < cumsum_sample_size[i] + actual_sample_size[i]; j++) {
bool flag = false;
for (int k = 0; k < neighbors_[i].size(); k++) {
if (res[j] == neighbors_[i][k]) {
flag = true;
break;
}
}
ASSERT_EQ(flag, true);
}
}
delete[] res;
delete[] actual_sample_size;
delete[] cumsum_sample_size;
delete neighbor_sample_res;
}
|
87f15cded9b327f1168b9cf51261e30d40784254.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_95_5_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 87f15cded9b327f1168b9cf51261e30d40784254.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_95_5_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.